2024-11-28 07:21:09,577 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-28 07:21:09,594 main DEBUG Took 0.014359 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-28 07:21:09,594 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-28 07:21:09,595 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-28 07:21:09,596 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-28 07:21:09,597 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,606 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-28 07:21:09,623 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,625 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,626 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,627 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,627 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,628 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,629 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,629 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,630 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,630 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,631 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,632 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,633 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,633 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,634 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,634 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,635 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,635 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,636 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,636 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,637 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,637 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,638 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,638 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 07:21:09,639 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,639 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-28 07:21:09,641 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 07:21:09,643 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-28 07:21:09,646 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-28 07:21:09,646 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-28 07:21:09,647 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-28 07:21:09,648 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-28 07:21:09,656 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-28 07:21:09,658 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-28 07:21:09,660 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-28 07:21:09,660 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-28 07:21:09,661 main DEBUG createAppenders(={Console}) 2024-11-28 07:21:09,661 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-28 07:21:09,662 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-28 07:21:09,662 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-28 07:21:09,662 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-28 07:21:09,663 main DEBUG OutputStream closed 2024-11-28 07:21:09,663 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-28 07:21:09,663 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-28 07:21:09,663 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-28 07:21:09,730 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-28 07:21:09,732 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-28 07:21:09,733 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-28 07:21:09,734 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-28 07:21:09,735 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-28 07:21:09,735 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-28 07:21:09,735 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-28 07:21:09,736 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-28 07:21:09,736 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-28 07:21:09,736 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-28 07:21:09,737 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-28 07:21:09,737 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-28 07:21:09,737 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-28 07:21:09,737 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-28 07:21:09,738 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-28 07:21:09,738 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-28 07:21:09,738 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-28 07:21:09,739 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-28 07:21:09,741 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-28 07:21:09,742 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-28 07:21:09,742 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-28 07:21:09,743 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-28T07:21:10,010 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae 2024-11-28 07:21:10,013 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-28 07:21:10,014 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-28T07:21:10,023 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-28T07:21:10,047 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-28T07:21:10,051 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/cluster_b29cffd8-c664-5fae-adbc-5f9b55ebe6d3, deleteOnExit=true 2024-11-28T07:21:10,051 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-28T07:21:10,052 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/test.cache.data in system properties and HBase conf 2024-11-28T07:21:10,052 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/hadoop.tmp.dir in system properties and HBase conf 2024-11-28T07:21:10,053 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/hadoop.log.dir in system properties and HBase conf 2024-11-28T07:21:10,053 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-28T07:21:10,054 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-28T07:21:10,054 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-28T07:21:10,143 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-28T07:21:10,266 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-28T07:21:10,272 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-28T07:21:10,272 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-28T07:21:10,273 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-28T07:21:10,274 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T07:21:10,274 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-28T07:21:10,275 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-28T07:21:10,276 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T07:21:10,276 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T07:21:10,277 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-28T07:21:10,277 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/nfs.dump.dir in system properties and HBase conf 2024-11-28T07:21:10,278 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/java.io.tmpdir in system properties and HBase conf 2024-11-28T07:21:10,278 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T07:21:10,279 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-28T07:21:10,280 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-28T07:21:11,130 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-28T07:21:11,224 INFO [Time-limited test {}] log.Log(170): Logging initialized @2464ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-28T07:21:11,314 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T07:21:11,398 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T07:21:11,425 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T07:21:11,425 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T07:21:11,427 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T07:21:11,444 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T07:21:11,448 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/hadoop.log.dir/,AVAILABLE} 2024-11-28T07:21:11,449 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T07:21:11,672 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/java.io.tmpdir/jetty-localhost-37017-hadoop-hdfs-3_4_1-tests_jar-_-any-7341232412679525222/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-28T07:21:11,682 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:37017} 2024-11-28T07:21:11,683 INFO [Time-limited test {}] server.Server(415): Started @2924ms 2024-11-28T07:21:12,092 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T07:21:12,099 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T07:21:12,100 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T07:21:12,100 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T07:21:12,100 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T07:21:12,101 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/hadoop.log.dir/,AVAILABLE} 2024-11-28T07:21:12,101 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T07:21:12,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/java.io.tmpdir/jetty-localhost-36847-hadoop-hdfs-3_4_1-tests_jar-_-any-13732639513272258057/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T07:21:12,221 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:36847} 2024-11-28T07:21:12,221 INFO [Time-limited test {}] server.Server(415): Started @3462ms 2024-11-28T07:21:12,278 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-28T07:21:12,759 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/cluster_b29cffd8-c664-5fae-adbc-5f9b55ebe6d3/dfs/data/data2/current/BP-2139004249-172.17.0.2-1732778470874/current, will proceed with Du for space computation calculation, 2024-11-28T07:21:12,759 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/cluster_b29cffd8-c664-5fae-adbc-5f9b55ebe6d3/dfs/data/data1/current/BP-2139004249-172.17.0.2-1732778470874/current, will proceed with Du for space computation calculation, 2024-11-28T07:21:12,799 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-28T07:21:12,851 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae 2024-11-28T07:21:12,856 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb9b6b63d09c36e70 with lease ID 0xec09916ad773ec55: Processing first storage report for DS-b83649e5-dde3-4f5d-89ad-9c228d59fe40 from datanode DatanodeRegistration(127.0.0.1:35055, datanodeUuid=7160b532-67ad-4edd-8dd7-ef7aec28b2f2, infoPort=33585, infoSecurePort=0, ipcPort=43389, storageInfo=lv=-57;cid=testClusterID;nsid=151870553;c=1732778470874) 2024-11-28T07:21:12,858 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9b6b63d09c36e70 with lease ID 0xec09916ad773ec55: from storage DS-b83649e5-dde3-4f5d-89ad-9c228d59fe40 node DatanodeRegistration(127.0.0.1:35055, datanodeUuid=7160b532-67ad-4edd-8dd7-ef7aec28b2f2, infoPort=33585, infoSecurePort=0, ipcPort=43389, storageInfo=lv=-57;cid=testClusterID;nsid=151870553;c=1732778470874), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-28T07:21:12,858 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb9b6b63d09c36e70 with lease ID 0xec09916ad773ec55: Processing first storage report for DS-7f034a3c-f97b-4b13-92d3-712ab7e061bb from datanode DatanodeRegistration(127.0.0.1:35055, datanodeUuid=7160b532-67ad-4edd-8dd7-ef7aec28b2f2, infoPort=33585, infoSecurePort=0, ipcPort=43389, storageInfo=lv=-57;cid=testClusterID;nsid=151870553;c=1732778470874) 2024-11-28T07:21:12,859 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9b6b63d09c36e70 with lease ID 0xec09916ad773ec55: from storage DS-7f034a3c-f97b-4b13-92d3-712ab7e061bb node DatanodeRegistration(127.0.0.1:35055, datanodeUuid=7160b532-67ad-4edd-8dd7-ef7aec28b2f2, infoPort=33585, infoSecurePort=0, ipcPort=43389, storageInfo=lv=-57;cid=testClusterID;nsid=151870553;c=1732778470874), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-28T07:21:12,928 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/cluster_b29cffd8-c664-5fae-adbc-5f9b55ebe6d3/zookeeper_0, clientPort=56318, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/cluster_b29cffd8-c664-5fae-adbc-5f9b55ebe6d3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/cluster_b29cffd8-c664-5fae-adbc-5f9b55ebe6d3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-28T07:21:12,938 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=56318 2024-11-28T07:21:12,951 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T07:21:12,957 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T07:21:13,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741825_1001 (size=7) 2024-11-28T07:21:13,620 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e with version=8 2024-11-28T07:21:13,621 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/hbase-staging 2024-11-28T07:21:13,753 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-28T07:21:14,024 INFO [Time-limited test {}] client.ConnectionUtils(129): master/592d8b721726:0 server-side Connection retries=45 2024-11-28T07:21:14,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T07:21:14,046 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T07:21:14,047 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T07:21:14,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T07:21:14,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T07:21:14,181 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T07:21:14,241 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-28T07:21:14,251 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-28T07:21:14,254 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T07:21:14,282 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 21588 (auto-detected) 2024-11-28T07:21:14,283 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-28T07:21:14,302 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:41703 2024-11-28T07:21:14,310 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T07:21:14,314 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T07:21:14,332 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:41703 connecting to ZooKeeper ensemble=127.0.0.1:56318 2024-11-28T07:21:14,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:417030x0, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T07:21:14,366 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41703-0x1003d00eeb50000 connected 2024-11-28T07:21:14,404 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T07:21:14,408 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T07:21:14,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T07:21:14,415 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41703 2024-11-28T07:21:14,415 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41703 2024-11-28T07:21:14,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41703 2024-11-28T07:21:14,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41703 2024-11-28T07:21:14,419 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41703 2024-11-28T07:21:14,426 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e, hbase.cluster.distributed=false 2024-11-28T07:21:14,489 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/592d8b721726:0 server-side Connection retries=45 2024-11-28T07:21:14,489 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T07:21:14,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T07:21:14,490 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T07:21:14,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T07:21:14,490 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T07:21:14,492 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T07:21:14,494 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T07:21:14,495 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33143 2024-11-28T07:21:14,497 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-28T07:21:14,502 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-28T07:21:14,504 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T07:21:14,507 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T07:21:14,510 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33143 connecting to ZooKeeper ensemble=127.0.0.1:56318 2024-11-28T07:21:14,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331430x0, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T07:21:14,514 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33143-0x1003d00eeb50001 connected 2024-11-28T07:21:14,514 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T07:21:14,516 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T07:21:14,517 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T07:21:14,518 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33143 2024-11-28T07:21:14,518 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33143 2024-11-28T07:21:14,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33143 2024-11-28T07:21:14,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33143 2024-11-28T07:21:14,520 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33143 2024-11-28T07:21:14,523 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/592d8b721726,41703,1732778473746 2024-11-28T07:21:14,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T07:21:14,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T07:21:14,534 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/592d8b721726,41703,1732778473746 2024-11-28T07:21:14,541 DEBUG [M:0;592d8b721726:41703 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;592d8b721726:41703 2024-11-28T07:21:14,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T07:21:14,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T07:21:14,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:14,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:14,559 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-28T07:21:14,560 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-28T07:21:14,560 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/592d8b721726,41703,1732778473746 from backup master directory 2024-11-28T07:21:14,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/592d8b721726,41703,1732778473746 2024-11-28T07:21:14,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T07:21:14,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T07:21:14,563 WARN [master/592d8b721726:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T07:21:14,564 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=592d8b721726,41703,1732778473746 2024-11-28T07:21:14,566 INFO [master/592d8b721726:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-28T07:21:14,567 INFO [master/592d8b721726:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-28T07:21:14,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741826_1002 (size=42) 2024-11-28T07:21:15,039 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/hbase.id with ID: 4e41e5e1-ce7d-4afc-a3fb-c097a83fc949 2024-11-28T07:21:15,079 INFO [master/592d8b721726:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T07:21:15,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:15,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:15,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741827_1003 (size=196) 2024-11-28T07:21:15,543 INFO [master/592d8b721726:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:21:15,545 INFO [master/592d8b721726:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-28T07:21:15,563 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:15,568 INFO [master/592d8b721726:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T07:21:15,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741828_1004 (size=1189) 2024-11-28T07:21:15,621 INFO [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store 2024-11-28T07:21:15,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741829_1005 (size=34) 2024-11-28T07:21:15,644 INFO [master/592d8b721726:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-28T07:21:15,645 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:15,646 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-28T07:21:15,647 INFO [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T07:21:15,647 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T07:21:15,647 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-28T07:21:15,647 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T07:21:15,647 INFO [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T07:21:15,648 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-28T07:21:15,650 WARN [master/592d8b721726:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/.initializing 2024-11-28T07:21:15,650 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/WALs/592d8b721726,41703,1732778473746 2024-11-28T07:21:15,658 INFO [master/592d8b721726:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T07:21:15,669 INFO [master/592d8b721726:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=592d8b721726%2C41703%2C1732778473746, suffix=, logDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/WALs/592d8b721726,41703,1732778473746, archiveDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/oldWALs, maxLogs=10 2024-11-28T07:21:15,693 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/WALs/592d8b721726,41703,1732778473746/592d8b721726%2C41703%2C1732778473746.1732778475674, exclude list is [], retry=0 2024-11-28T07:21:15,711 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35055,DS-b83649e5-dde3-4f5d-89ad-9c228d59fe40,DISK] 2024-11-28T07:21:15,714 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-28T07:21:15,752 INFO [master/592d8b721726:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/WALs/592d8b721726,41703,1732778473746/592d8b721726%2C41703%2C1732778473746.1732778475674 2024-11-28T07:21:15,753 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33585:33585)] 2024-11-28T07:21:15,754 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:21:15,754 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:15,758 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T07:21:15,759 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T07:21:15,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T07:21:15,822 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-28T07:21:15,826 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:15,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T07:21:15,830 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T07:21:15,833 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-28T07:21:15,833 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:15,834 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:15,834 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T07:21:15,837 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-28T07:21:15,837 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:15,838 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:15,839 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T07:21:15,842 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-28T07:21:15,842 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:15,843 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:15,847 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-28T07:21:15,848 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-28T07:21:15,856 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-28T07:21:15,860 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T07:21:15,864 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T07:21:15,865 INFO [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63946311, jitterRate=-0.04712571203708649}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-28T07:21:15,869 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-28T07:21:15,870 INFO [master/592d8b721726:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-28T07:21:15,899 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e262798, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:15,934 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-28T07:21:15,945 INFO [master/592d8b721726:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-28T07:21:15,946 INFO [master/592d8b721726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-28T07:21:15,948 INFO [master/592d8b721726:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-28T07:21:15,949 INFO [master/592d8b721726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-28T07:21:15,954 INFO [master/592d8b721726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-28T07:21:15,954 INFO [master/592d8b721726:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-28T07:21:15,980 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-28T07:21:15,992 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-28T07:21:15,997 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-28T07:21:16,000 INFO [master/592d8b721726:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-28T07:21:16,001 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-28T07:21:16,003 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-28T07:21:16,005 INFO [master/592d8b721726:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-28T07:21:16,009 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-28T07:21:16,010 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-28T07:21:16,011 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-28T07:21:16,013 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-28T07:21:16,023 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-28T07:21:16,024 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-28T07:21:16,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T07:21:16,028 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T07:21:16,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:16,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:16,029 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=592d8b721726,41703,1732778473746, sessionid=0x1003d00eeb50000, setting cluster-up flag (Was=false) 2024-11-28T07:21:16,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:16,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:16,047 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-28T07:21:16,049 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=592d8b721726,41703,1732778473746 2024-11-28T07:21:16,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:16,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:16,059 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-28T07:21:16,061 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=592d8b721726,41703,1732778473746 2024-11-28T07:21:16,141 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;592d8b721726:33143 2024-11-28T07:21:16,143 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1008): ClusterId : 4e41e5e1-ce7d-4afc-a3fb-c097a83fc949 2024-11-28T07:21:16,147 DEBUG [RS:0;592d8b721726:33143 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-28T07:21:16,152 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-28T07:21:16,153 DEBUG [RS:0;592d8b721726:33143 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-28T07:21:16,154 DEBUG [RS:0;592d8b721726:33143 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-28T07:21:16,157 DEBUG [RS:0;592d8b721726:33143 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-28T07:21:16,158 DEBUG [RS:0;592d8b721726:33143 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18644e91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:16,158 INFO [master/592d8b721726:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-28T07:21:16,160 DEBUG [RS:0;592d8b721726:33143 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@743a34b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=592d8b721726/172.17.0.2:0 2024-11-28T07:21:16,161 INFO [master/592d8b721726:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-28T07:21:16,163 INFO [RS:0;592d8b721726:33143 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-28T07:21:16,163 INFO [RS:0;592d8b721726:33143 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-28T07:21:16,163 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-28T07:21:16,165 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(3073): reportForDuty to master=592d8b721726,41703,1732778473746 with isa=592d8b721726/172.17.0.2:33143, startcode=1732778474488 2024-11-28T07:21:16,167 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 592d8b721726,41703,1732778473746 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-28T07:21:16,170 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/592d8b721726:0, corePoolSize=5, maxPoolSize=5 2024-11-28T07:21:16,170 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/592d8b721726:0, corePoolSize=5, maxPoolSize=5 2024-11-28T07:21:16,170 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/592d8b721726:0, corePoolSize=5, maxPoolSize=5 2024-11-28T07:21:16,170 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/592d8b721726:0, corePoolSize=5, maxPoolSize=5 2024-11-28T07:21:16,170 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/592d8b721726:0, corePoolSize=10, maxPoolSize=10 2024-11-28T07:21:16,171 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,171 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/592d8b721726:0, corePoolSize=2, maxPoolSize=2 2024-11-28T07:21:16,171 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,175 INFO [master/592d8b721726:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732778506175 2024-11-28T07:21:16,176 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-28T07:21:16,176 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-28T07:21:16,177 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-28T07:21:16,179 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-28T07:21:16,181 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:16,181 DEBUG [RS:0;592d8b721726:33143 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T07:21:16,181 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-28T07:21:16,183 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-28T07:21:16,183 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-28T07:21:16,184 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-28T07:21:16,184 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-28T07:21:16,188 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,189 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-28T07:21:16,191 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-28T07:21:16,191 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-28T07:21:16,198 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-28T07:21:16,198 INFO [master/592d8b721726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-28T07:21:16,200 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/592d8b721726:0:becomeActiveMaster-HFileCleaner.large.0-1732778476199,5,FailOnTimeoutGroup] 2024-11-28T07:21:16,202 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/592d8b721726:0:becomeActiveMaster-HFileCleaner.small.0-1732778476200,5,FailOnTimeoutGroup] 2024-11-28T07:21:16,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741831_1007 (size=1039) 2024-11-28T07:21:16,202 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,202 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-28T07:21:16,203 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,203 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,206 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-28T07:21:16,206 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e 2024-11-28T07:21:16,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741832_1008 (size=32) 2024-11-28T07:21:16,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:16,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-28T07:21:16,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-28T07:21:16,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:16,225 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49637, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T07:21:16,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T07:21:16,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-28T07:21:16,228 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-28T07:21:16,228 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:16,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T07:21:16,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-28T07:21:16,232 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-28T07:21:16,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:16,232 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41703 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 592d8b721726,33143,1732778474488 2024-11-28T07:21:16,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T07:21:16,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740 2024-11-28T07:21:16,235 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740 2024-11-28T07:21:16,235 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41703 {}] master.ServerManager(486): Registering regionserver=592d8b721726,33143,1732778474488 2024-11-28T07:21:16,238 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:21:16,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-28T07:21:16,248 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T07:21:16,250 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73630234, jitterRate=0.09717598557472229}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:21:16,251 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e 2024-11-28T07:21:16,252 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44329 2024-11-28T07:21:16,252 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-28T07:21:16,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-28T07:21:16,254 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-28T07:21:16,254 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-28T07:21:16,254 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-28T07:21:16,254 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-28T07:21:16,254 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-28T07:21:16,256 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-28T07:21:16,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-28T07:21:16,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-28T07:21:16,257 DEBUG [RS:0;592d8b721726:33143 {}] zookeeper.ZKUtil(111): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/592d8b721726,33143,1732778474488 2024-11-28T07:21:16,257 WARN [RS:0;592d8b721726:33143 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T07:21:16,257 INFO [RS:0;592d8b721726:33143 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T07:21:16,257 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/WALs/592d8b721726,33143,1732778474488 2024-11-28T07:21:16,259 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-28T07:21:16,259 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-28T07:21:16,259 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [592d8b721726,33143,1732778474488] 2024-11-28T07:21:16,266 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-28T07:21:16,270 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-28T07:21:16,276 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-28T07:21:16,279 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-28T07:21:16,282 INFO [RS:0;592d8b721726:33143 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-28T07:21:16,294 INFO [RS:0;592d8b721726:33143 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-28T07:21:16,296 INFO [RS:0;592d8b721726:33143 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-28T07:21:16,296 INFO [RS:0;592d8b721726:33143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,297 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-28T07:21:16,304 INFO [RS:0;592d8b721726:33143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,304 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,304 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,304 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,305 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,305 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,305 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/592d8b721726:0, corePoolSize=2, maxPoolSize=2 2024-11-28T07:21:16,305 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,305 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,305 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,305 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,306 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/592d8b721726:0, corePoolSize=1, maxPoolSize=1 2024-11-28T07:21:16,306 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/592d8b721726:0, corePoolSize=3, maxPoolSize=3 2024-11-28T07:21:16,306 DEBUG [RS:0;592d8b721726:33143 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0, corePoolSize=3, maxPoolSize=3 2024-11-28T07:21:16,306 INFO [RS:0;592d8b721726:33143 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,307 INFO [RS:0;592d8b721726:33143 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,307 INFO [RS:0;592d8b721726:33143 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,307 INFO [RS:0;592d8b721726:33143 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,307 INFO [RS:0;592d8b721726:33143 {}] hbase.ChoreService(168): Chore ScheduledChore name=592d8b721726,33143,1732778474488-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T07:21:16,327 INFO [RS:0;592d8b721726:33143 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-28T07:21:16,328 INFO [RS:0;592d8b721726:33143 {}] hbase.ChoreService(168): Chore ScheduledChore name=592d8b721726,33143,1732778474488-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:16,351 INFO [RS:0;592d8b721726:33143 {}] regionserver.Replication(204): 592d8b721726,33143,1732778474488 started 2024-11-28T07:21:16,351 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1767): Serving as 592d8b721726,33143,1732778474488, RpcServer on 592d8b721726/172.17.0.2:33143, sessionid=0x1003d00eeb50001 2024-11-28T07:21:16,352 DEBUG [RS:0;592d8b721726:33143 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-28T07:21:16,352 DEBUG [RS:0;592d8b721726:33143 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 592d8b721726,33143,1732778474488 2024-11-28T07:21:16,352 DEBUG [RS:0;592d8b721726:33143 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '592d8b721726,33143,1732778474488' 2024-11-28T07:21:16,352 DEBUG [RS:0;592d8b721726:33143 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-28T07:21:16,353 DEBUG [RS:0;592d8b721726:33143 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-28T07:21:16,354 DEBUG [RS:0;592d8b721726:33143 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-28T07:21:16,354 DEBUG [RS:0;592d8b721726:33143 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-28T07:21:16,354 DEBUG [RS:0;592d8b721726:33143 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 592d8b721726,33143,1732778474488 2024-11-28T07:21:16,354 DEBUG [RS:0;592d8b721726:33143 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '592d8b721726,33143,1732778474488' 2024-11-28T07:21:16,354 DEBUG [RS:0;592d8b721726:33143 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-28T07:21:16,355 DEBUG [RS:0;592d8b721726:33143 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-28T07:21:16,356 DEBUG [RS:0;592d8b721726:33143 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-28T07:21:16,356 INFO [RS:0;592d8b721726:33143 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-28T07:21:16,356 INFO [RS:0;592d8b721726:33143 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-28T07:21:16,430 WARN [592d8b721726:41703 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-11-28T07:21:16,462 INFO [RS:0;592d8b721726:33143 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T07:21:16,465 INFO [RS:0;592d8b721726:33143 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=592d8b721726%2C33143%2C1732778474488, suffix=, logDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/WALs/592d8b721726,33143,1732778474488, archiveDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/oldWALs, maxLogs=32 2024-11-28T07:21:16,482 DEBUG [RS:0;592d8b721726:33143 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/WALs/592d8b721726,33143,1732778474488/592d8b721726%2C33143%2C1732778474488.1732778476468, exclude list is [], retry=0 2024-11-28T07:21:16,487 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35055,DS-b83649e5-dde3-4f5d-89ad-9c228d59fe40,DISK] 2024-11-28T07:21:16,491 INFO [RS:0;592d8b721726:33143 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/WALs/592d8b721726,33143,1732778474488/592d8b721726%2C33143%2C1732778474488.1732778476468 2024-11-28T07:21:16,491 DEBUG [RS:0;592d8b721726:33143 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33585:33585)] 2024-11-28T07:21:16,682 DEBUG [592d8b721726:41703 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-28T07:21:16,687 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:16,692 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 592d8b721726,33143,1732778474488, state=OPENING 2024-11-28T07:21:16,698 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-28T07:21:16,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:16,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:16,700 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T07:21:16,700 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T07:21:16,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:21:16,876 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:16,878 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T07:21:16,881 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50290, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T07:21:16,892 INFO [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-28T07:21:16,892 INFO [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T07:21:16,893 INFO [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-28T07:21:16,896 INFO [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=592d8b721726%2C33143%2C1732778474488.meta, suffix=.meta, logDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/WALs/592d8b721726,33143,1732778474488, archiveDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/oldWALs, maxLogs=32 2024-11-28T07:21:16,912 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/WALs/592d8b721726,33143,1732778474488/592d8b721726%2C33143%2C1732778474488.meta.1732778476898.meta, exclude list is [], retry=0 2024-11-28T07:21:16,916 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35055,DS-b83649e5-dde3-4f5d-89ad-9c228d59fe40,DISK] 2024-11-28T07:21:16,919 INFO [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/WALs/592d8b721726,33143,1732778474488/592d8b721726%2C33143%2C1732778474488.meta.1732778476898.meta 2024-11-28T07:21:16,920 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33585:33585)] 2024-11-28T07:21:16,920 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:21:16,921 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-28T07:21:16,980 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-28T07:21:16,985 INFO [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-28T07:21:16,989 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-28T07:21:16,990 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:16,990 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-28T07:21:16,990 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-28T07:21:16,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-28T07:21:16,995 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-28T07:21:16,995 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:16,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T07:21:16,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-28T07:21:16,997 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-28T07:21:16,998 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:16,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T07:21:16,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-28T07:21:17,000 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-28T07:21:17,000 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:17,001 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T07:21:17,002 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740 2024-11-28T07:21:17,004 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740 2024-11-28T07:21:17,007 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:21:17,009 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-28T07:21:17,011 INFO [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70224497, jitterRate=0.046426549553871155}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:21:17,012 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-28T07:21:17,020 INFO [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732778476870 2024-11-28T07:21:17,030 DEBUG [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-28T07:21:17,031 INFO [RS_OPEN_META-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-28T07:21:17,032 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:17,034 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 592d8b721726,33143,1732778474488, state=OPEN 2024-11-28T07:21:17,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T07:21:17,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T07:21:17,038 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T07:21:17,038 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T07:21:17,042 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-28T07:21:17,042 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=592d8b721726,33143,1732778474488 in 336 msec 2024-11-28T07:21:17,048 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-28T07:21:17,048 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 777 msec 2024-11-28T07:21:17,053 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 946 msec 2024-11-28T07:21:17,054 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732778477053, completionTime=-1 2024-11-28T07:21:17,054 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-28T07:21:17,054 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-28T07:21:17,092 DEBUG [hconnection-0x10bb86e4-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:17,095 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50300, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:17,105 INFO [master/592d8b721726:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-28T07:21:17,105 INFO [master/592d8b721726:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732778537105 2024-11-28T07:21:17,105 INFO [master/592d8b721726:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732778597105 2024-11-28T07:21:17,105 INFO [master/592d8b721726:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 50 msec 2024-11-28T07:21:17,128 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=592d8b721726,41703,1732778473746-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:17,128 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=592d8b721726,41703,1732778473746-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:17,129 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=592d8b721726,41703,1732778473746-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:17,130 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-592d8b721726:41703, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:17,131 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:17,136 DEBUG [master/592d8b721726:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-28T07:21:17,140 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-28T07:21:17,141 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-28T07:21:17,147 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-28T07:21:17,151 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T07:21:17,153 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:17,155 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T07:21:17,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741835_1011 (size=358) 2024-11-28T07:21:17,574 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 29128ed80b74de0f148960cd93ceedac, NAME => 'hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e 2024-11-28T07:21:17,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741836_1012 (size=42) 2024-11-28T07:21:17,585 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:17,585 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 29128ed80b74de0f148960cd93ceedac, disabling compactions & flushes 2024-11-28T07:21:17,585 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:21:17,585 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:21:17,585 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. after waiting 0 ms 2024-11-28T07:21:17,585 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:21:17,585 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:21:17,585 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 29128ed80b74de0f148960cd93ceedac: 2024-11-28T07:21:17,588 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T07:21:17,595 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732778477589"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732778477589"}]},"ts":"1732778477589"} 2024-11-28T07:21:17,622 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T07:21:17,625 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T07:21:17,627 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778477625"}]},"ts":"1732778477625"} 2024-11-28T07:21:17,632 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-28T07:21:17,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=29128ed80b74de0f148960cd93ceedac, ASSIGN}] 2024-11-28T07:21:17,640 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=29128ed80b74de0f148960cd93ceedac, ASSIGN 2024-11-28T07:21:17,642 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=29128ed80b74de0f148960cd93ceedac, ASSIGN; state=OFFLINE, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=false 2024-11-28T07:21:17,793 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=29128ed80b74de0f148960cd93ceedac, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:17,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 29128ed80b74de0f148960cd93ceedac, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:21:17,952 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:17,959 INFO [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:21:17,959 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 29128ed80b74de0f148960cd93ceedac, NAME => 'hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:21:17,959 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 29128ed80b74de0f148960cd93ceedac 2024-11-28T07:21:17,960 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:17,960 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 29128ed80b74de0f148960cd93ceedac 2024-11-28T07:21:17,960 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 29128ed80b74de0f148960cd93ceedac 2024-11-28T07:21:17,965 INFO [StoreOpener-29128ed80b74de0f148960cd93ceedac-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 29128ed80b74de0f148960cd93ceedac 2024-11-28T07:21:17,968 INFO [StoreOpener-29128ed80b74de0f148960cd93ceedac-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29128ed80b74de0f148960cd93ceedac columnFamilyName info 2024-11-28T07:21:17,968 DEBUG [StoreOpener-29128ed80b74de0f148960cd93ceedac-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:17,969 INFO [StoreOpener-29128ed80b74de0f148960cd93ceedac-1 {}] regionserver.HStore(327): Store=29128ed80b74de0f148960cd93ceedac/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:17,970 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/namespace/29128ed80b74de0f148960cd93ceedac 2024-11-28T07:21:17,971 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/namespace/29128ed80b74de0f148960cd93ceedac 2024-11-28T07:21:17,974 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 29128ed80b74de0f148960cd93ceedac 2024-11-28T07:21:17,977 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/namespace/29128ed80b74de0f148960cd93ceedac/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T07:21:17,978 INFO [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 29128ed80b74de0f148960cd93ceedac; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59834720, jitterRate=-0.10839319229125977}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T07:21:17,980 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 29128ed80b74de0f148960cd93ceedac: 2024-11-28T07:21:17,983 INFO [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac., pid=6, masterSystemTime=1732778477952 2024-11-28T07:21:17,987 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:21:17,987 INFO [RS_OPEN_PRIORITY_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:21:17,988 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=29128ed80b74de0f148960cd93ceedac, regionState=OPEN, openSeqNum=2, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:17,995 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-28T07:21:17,995 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 29128ed80b74de0f148960cd93ceedac, server=592d8b721726,33143,1732778474488 in 194 msec 2024-11-28T07:21:17,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-28T07:21:17,999 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=29128ed80b74de0f148960cd93ceedac, ASSIGN in 357 msec 2024-11-28T07:21:18,000 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T07:21:18,000 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778478000"}]},"ts":"1732778478000"} 2024-11-28T07:21:18,003 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-28T07:21:18,007 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T07:21:18,009 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 865 msec 2024-11-28T07:21:18,051 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-28T07:21:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-28T07:21:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:18,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:21:18,083 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-28T07:21:18,098 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-28T07:21:18,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-11-28T07:21:18,107 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-28T07:21:18,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-28T07:21:18,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-11-28T07:21:18,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-28T07:21:18,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-28T07:21:18,136 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.572sec 2024-11-28T07:21:18,138 INFO [master/592d8b721726:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-28T07:21:18,139 INFO [master/592d8b721726:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-28T07:21:18,141 INFO [master/592d8b721726:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-28T07:21:18,141 INFO [master/592d8b721726:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-28T07:21:18,141 INFO [master/592d8b721726:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-28T07:21:18,143 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=592d8b721726,41703,1732778473746-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T07:21:18,143 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=592d8b721726,41703,1732778473746-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-28T07:21:18,151 DEBUG [master/592d8b721726:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-28T07:21:18,152 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-28T07:21:18,152 INFO [master/592d8b721726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=592d8b721726,41703,1732778473746-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T07:21:18,242 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-11-28T07:21:18,242 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-28T07:21:18,250 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:18,256 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-28T07:21:18,256 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-28T07:21:18,270 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:18,281 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50306, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:18,290 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=592d8b721726,41703,1732778473746 2024-11-28T07:21:18,304 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=251, ProcessCount=11, AvailableMemoryMB=5569 2024-11-28T07:21:18,315 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T07:21:18,318 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T07:21:18,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T07:21:18,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:21:18,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:18,357 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T07:21:18,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-28T07:21:18,358 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:18,360 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T07:21:18,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T07:21:18,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741837_1013 (size=960) 2024-11-28T07:21:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T07:21:18,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T07:21:18,788 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e 2024-11-28T07:21:18,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741838_1014 (size=53) 2024-11-28T07:21:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T07:21:19,199 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:19,199 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing af0c88dc7f2cd28f9a7271a3bc766683, disabling compactions & flushes 2024-11-28T07:21:19,199 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:19,199 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:19,199 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. after waiting 0 ms 2024-11-28T07:21:19,199 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:19,200 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:19,200 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:19,202 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T07:21:19,202 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732778479202"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732778479202"}]},"ts":"1732778479202"} 2024-11-28T07:21:19,205 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T07:21:19,206 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T07:21:19,207 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778479207"}]},"ts":"1732778479207"} 2024-11-28T07:21:19,209 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T07:21:19,213 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=af0c88dc7f2cd28f9a7271a3bc766683, ASSIGN}] 2024-11-28T07:21:19,215 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=af0c88dc7f2cd28f9a7271a3bc766683, ASSIGN 2024-11-28T07:21:19,217 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=af0c88dc7f2cd28f9a7271a3bc766683, ASSIGN; state=OFFLINE, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=false 2024-11-28T07:21:19,367 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=af0c88dc7f2cd28f9a7271a3bc766683, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:19,371 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:21:19,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T07:21:19,525 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:19,531 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:19,531 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:21:19,532 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:19,532 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:19,532 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:19,532 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:19,534 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:19,537 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:21:19,538 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af0c88dc7f2cd28f9a7271a3bc766683 columnFamilyName A 2024-11-28T07:21:19,538 DEBUG [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:19,539 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] regionserver.HStore(327): Store=af0c88dc7f2cd28f9a7271a3bc766683/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:19,539 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:19,541 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:21:19,541 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af0c88dc7f2cd28f9a7271a3bc766683 columnFamilyName B 2024-11-28T07:21:19,541 DEBUG [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:19,542 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] regionserver.HStore(327): Store=af0c88dc7f2cd28f9a7271a3bc766683/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:19,542 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:19,544 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:21:19,544 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af0c88dc7f2cd28f9a7271a3bc766683 columnFamilyName C 2024-11-28T07:21:19,544 DEBUG [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:19,545 INFO [StoreOpener-af0c88dc7f2cd28f9a7271a3bc766683-1 {}] regionserver.HStore(327): Store=af0c88dc7f2cd28f9a7271a3bc766683/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:19,546 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:19,547 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:19,547 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:19,550 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:21:19,553 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:19,556 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T07:21:19,557 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened af0c88dc7f2cd28f9a7271a3bc766683; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67081169, jitterRate=-4.1268765926361084E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:21:19,558 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:19,559 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., pid=11, masterSystemTime=1732778479525 2024-11-28T07:21:19,562 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:19,562 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:19,563 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=af0c88dc7f2cd28f9a7271a3bc766683, regionState=OPEN, openSeqNum=2, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:19,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-28T07:21:19,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 in 195 msec 2024-11-28T07:21:19,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-28T07:21:19,573 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=af0c88dc7f2cd28f9a7271a3bc766683, ASSIGN in 357 msec 2024-11-28T07:21:19,575 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T07:21:19,575 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778479575"}]},"ts":"1732778479575"} 2024-11-28T07:21:19,578 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T07:21:19,582 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T07:21:19,585 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2310 sec 2024-11-28T07:21:20,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T07:21:20,484 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-28T07:21:20,489 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-11-28T07:21:20,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,495 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,497 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,501 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T07:21:20,505 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56382, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T07:21:20,513 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-11-28T07:21:20,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,518 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-11-28T07:21:20,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,523 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-11-28T07:21:20,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,528 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-11-28T07:21:20,531 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,532 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-11-28T07:21:20,536 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,538 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-11-28T07:21:20,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,544 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-11-28T07:21:20,548 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,549 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-11-28T07:21:20,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,553 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-11-28T07:21:20,556 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:20,562 DEBUG [hconnection-0x21675e13-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,563 DEBUG [hconnection-0x176fe140-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,563 DEBUG [hconnection-0x426356f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,564 DEBUG [hconnection-0x2697246c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,564 DEBUG [hconnection-0x3bca7345-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,567 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,569 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,569 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:20,569 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50340, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,570 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50356, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,571 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50368, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,576 DEBUG [hconnection-0x4b800cbf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,577 DEBUG [hconnection-0x778c0741-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-28T07:21:20,579 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,581 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:20,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T07:21:20,584 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:20,586 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:20,596 DEBUG [hconnection-0x5f7a19bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,597 DEBUG [hconnection-0x4ee426b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:20,616 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,619 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50406, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,619 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:20,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:21:20,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:20,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T07:21:20,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:20,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:20,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:20,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:20,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:20,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:20,748 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:20,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:20,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:20,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:20,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:20,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:20,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:20,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:20,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/1c2ca8a251d04aebb9ad00c4c30d82d2 is 50, key is test_row_0/A:col10/1732778480673/Put/seqid=0 2024-11-28T07:21:20,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778540813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:20,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778540822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:20,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778540829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:20,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778540830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:20,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778540833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:20,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741839_1015 (size=14341) 2024-11-28T07:21:20,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/1c2ca8a251d04aebb9ad00c4c30d82d2 2024-11-28T07:21:20,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T07:21:20,922 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:20,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:20,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:20,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:20,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:20,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:20,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:20,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:20,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778540963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:20,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778540965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:20,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778540964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:20,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778540965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:20,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:20,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778540966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,021 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/6957ecb2f7764a6890938abd3acc9614 is 50, key is test_row_0/B:col10/1732778480673/Put/seqid=0 2024-11-28T07:21:21,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741840_1016 (size=12001) 2024-11-28T07:21:21,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/6957ecb2f7764a6890938abd3acc9614 2024-11-28T07:21:21,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/a6a0316196634f89a7708516218e5626 is 50, key is test_row_0/C:col10/1732778480673/Put/seqid=0 2024-11-28T07:21:21,109 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:21,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:21,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:21,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741841_1017 (size=12001) 2024-11-28T07:21:21,129 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/a6a0316196634f89a7708516218e5626 2024-11-28T07:21:21,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/1c2ca8a251d04aebb9ad00c4c30d82d2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1c2ca8a251d04aebb9ad00c4c30d82d2 2024-11-28T07:21:21,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1c2ca8a251d04aebb9ad00c4c30d82d2, entries=200, sequenceid=14, filesize=14.0 K 2024-11-28T07:21:21,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/6957ecb2f7764a6890938abd3acc9614 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6957ecb2f7764a6890938abd3acc9614 2024-11-28T07:21:21,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6957ecb2f7764a6890938abd3acc9614, entries=150, sequenceid=14, filesize=11.7 K 2024-11-28T07:21:21,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/a6a0316196634f89a7708516218e5626 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a6a0316196634f89a7708516218e5626 2024-11-28T07:21:21,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T07:21:21,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778541171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778541172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a6a0316196634f89a7708516218e5626, entries=150, sequenceid=14, filesize=11.7 K 2024-11-28T07:21:21,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778541174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for af0c88dc7f2cd28f9a7271a3bc766683 in 535ms, sequenceid=14, compaction requested=false 2024-11-28T07:21:21,209 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-28T07:21:21,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:21,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:21,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T07:21:21,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:21,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:21,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:21,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:21,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:21,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:21,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/7177d32ac69649c79d5867bb47d4b4cd is 50, key is test_row_0/A:col10/1732778480826/Put/seqid=0 2024-11-28T07:21:21,276 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:21,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:21,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:21,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778541257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778541284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741842_1018 (size=16681) 2024-11-28T07:21:21,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778541389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778541391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,431 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:21,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:21,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:21,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778541506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778541507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778541512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,588 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:21,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:21,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:21,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778541596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778541598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T07:21:21,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/7177d32ac69649c79d5867bb47d4b4cd 2024-11-28T07:21:21,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/39ca18fc99324d5b97047eaa5ea500ae is 50, key is test_row_0/B:col10/1732778480826/Put/seqid=0 2024-11-28T07:21:21,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741843_1019 (size=12001) 2024-11-28T07:21:21,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/39ca18fc99324d5b97047eaa5ea500ae 2024-11-28T07:21:21,743 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:21,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:21,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:21,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d34df5b7ce1e43bd945df01bb4239692 is 50, key is test_row_0/C:col10/1732778480826/Put/seqid=0 2024-11-28T07:21:21,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741844_1020 (size=12001) 2024-11-28T07:21:21,900 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:21,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:21,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:21,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:21,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:21,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778541905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:21,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:21,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778541910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:22,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778542018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:22,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778542019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:22,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778542031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,056 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:22,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:22,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:22,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:22,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:22,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:22,212 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:22,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:22,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:22,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:22,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:22,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:22,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d34df5b7ce1e43bd945df01bb4239692 2024-11-28T07:21:22,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/7177d32ac69649c79d5867bb47d4b4cd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/7177d32ac69649c79d5867bb47d4b4cd 2024-11-28T07:21:22,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/7177d32ac69649c79d5867bb47d4b4cd, entries=250, sequenceid=40, filesize=16.3 K 2024-11-28T07:21:22,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/39ca18fc99324d5b97047eaa5ea500ae as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/39ca18fc99324d5b97047eaa5ea500ae 2024-11-28T07:21:22,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/39ca18fc99324d5b97047eaa5ea500ae, entries=150, sequenceid=40, filesize=11.7 K 2024-11-28T07:21:22,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d34df5b7ce1e43bd945df01bb4239692 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d34df5b7ce1e43bd945df01bb4239692 2024-11-28T07:21:22,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d34df5b7ce1e43bd945df01bb4239692, entries=150, sequenceid=40, filesize=11.7 K 2024-11-28T07:21:22,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for af0c88dc7f2cd28f9a7271a3bc766683 in 1083ms, sequenceid=40, compaction requested=false 2024-11-28T07:21:22,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:22,367 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:22,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T07:21:22,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,369 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-28T07:21:22,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:22,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:22,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/5c04daf35e42411bbcaf6c6691eb7e75 is 50, key is test_row_0/A:col10/1732778481278/Put/seqid=0 2024-11-28T07:21:22,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:22,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:22,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741845_1021 (size=12001) 2024-11-28T07:21:22,430 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/5c04daf35e42411bbcaf6c6691eb7e75 2024-11-28T07:21:22,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/85103c5dfe0647ac94239a4b144fc831 is 50, key is test_row_0/B:col10/1732778481278/Put/seqid=0 2024-11-28T07:21:22,498 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T07:21:22,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741846_1022 (size=12001) 2024-11-28T07:21:22,505 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/85103c5dfe0647ac94239a4b144fc831 2024-11-28T07:21:22,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/ed95a9a850e54a509414dee1a0f70766 is 50, key is test_row_0/C:col10/1732778481278/Put/seqid=0 2024-11-28T07:21:22,572 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-28T07:21:22,574 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-28T07:21:22,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:22,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778542565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:22,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778542570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741847_1023 (size=12001) 2024-11-28T07:21:22,586 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/ed95a9a850e54a509414dee1a0f70766 2024-11-28T07:21:22,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/5c04daf35e42411bbcaf6c6691eb7e75 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/5c04daf35e42411bbcaf6c6691eb7e75 2024-11-28T07:21:22,614 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/5c04daf35e42411bbcaf6c6691eb7e75, entries=150, sequenceid=50, filesize=11.7 K 2024-11-28T07:21:22,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/85103c5dfe0647ac94239a4b144fc831 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/85103c5dfe0647ac94239a4b144fc831 2024-11-28T07:21:22,629 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/85103c5dfe0647ac94239a4b144fc831, entries=150, sequenceid=50, filesize=11.7 K 2024-11-28T07:21:22,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/ed95a9a850e54a509414dee1a0f70766 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ed95a9a850e54a509414dee1a0f70766 2024-11-28T07:21:22,643 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ed95a9a850e54a509414dee1a0f70766, entries=150, sequenceid=50, filesize=11.7 K 2024-11-28T07:21:22,645 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for af0c88dc7f2cd28f9a7271a3bc766683 in 276ms, sequenceid=50, compaction requested=true 2024-11-28T07:21:22,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:22,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-28T07:21:22,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-28T07:21:22,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-28T07:21:22,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0620 sec 2024-11-28T07:21:22,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.0820 sec 2024-11-28T07:21:22,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:22,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-28T07:21:22,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:22,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:22,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:22,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:22,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:22,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T07:21:22,693 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-28T07:21:22,697 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:22,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-28T07:21:22,700 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:22,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-28T07:21:22,701 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:22,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:22,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/1a9fa977ccaa473bb64a8fe9ed181c8d is 50, key is test_row_0/A:col10/1732778482684/Put/seqid=0 2024-11-28T07:21:22,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:22,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778542712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741848_1024 (size=16681) 2024-11-28T07:21:22,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:22,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778542721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/1a9fa977ccaa473bb64a8fe9ed181c8d 2024-11-28T07:21:22,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/8e907a8ee1b64cad9f152be72cdac58e is 50, key is test_row_0/B:col10/1732778482684/Put/seqid=0 2024-11-28T07:21:22,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-28T07:21:22,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741849_1025 (size=12001) 2024-11-28T07:21:22,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/8e907a8ee1b64cad9f152be72cdac58e 2024-11-28T07:21:22,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:22,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778542824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:22,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/081678340c0e45cba54ab58540e5097f is 50, key is test_row_0/C:col10/1732778482684/Put/seqid=0 2024-11-28T07:21:22,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778542830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:22,855 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:22,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-28T07:21:22,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:22,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,856 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:22,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:22,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:22,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741850_1026 (size=12001) 2024-11-28T07:21:22,863 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/081678340c0e45cba54ab58540e5097f 2024-11-28T07:21:22,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/1a9fa977ccaa473bb64a8fe9ed181c8d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1a9fa977ccaa473bb64a8fe9ed181c8d 2024-11-28T07:21:22,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1a9fa977ccaa473bb64a8fe9ed181c8d, entries=250, sequenceid=78, filesize=16.3 K 2024-11-28T07:21:22,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/8e907a8ee1b64cad9f152be72cdac58e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8e907a8ee1b64cad9f152be72cdac58e 2024-11-28T07:21:22,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8e907a8ee1b64cad9f152be72cdac58e, entries=150, sequenceid=78, filesize=11.7 K 2024-11-28T07:21:22,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/081678340c0e45cba54ab58540e5097f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/081678340c0e45cba54ab58540e5097f 2024-11-28T07:21:22,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/081678340c0e45cba54ab58540e5097f, entries=150, sequenceid=78, filesize=11.7 K 2024-11-28T07:21:22,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for af0c88dc7f2cd28f9a7271a3bc766683 in 231ms, sequenceid=78, compaction requested=true 2024-11-28T07:21:22,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:21:22,920 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:22,924 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:22,926 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:22,927 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,927 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6957ecb2f7764a6890938abd3acc9614, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/39ca18fc99324d5b97047eaa5ea500ae, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/85103c5dfe0647ac94239a4b144fc831, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8e907a8ee1b64cad9f152be72cdac58e] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=46.9 K 2024-11-28T07:21:22,928 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 6957ecb2f7764a6890938abd3acc9614, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732778480649 2024-11-28T07:21:22,929 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 39ca18fc99324d5b97047eaa5ea500ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732778480826 2024-11-28T07:21:22,930 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 85103c5dfe0647ac94239a4b144fc831, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732778481241 2024-11-28T07:21:22,931 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e907a8ee1b64cad9f152be72cdac58e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778482566 2024-11-28T07:21:22,933 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:22,936 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 59704 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:22,936 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:22,936 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:22,936 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1c2ca8a251d04aebb9ad00c4c30d82d2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/7177d32ac69649c79d5867bb47d4b4cd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/5c04daf35e42411bbcaf6c6691eb7e75, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1a9fa977ccaa473bb64a8fe9ed181c8d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=58.3 K 2024-11-28T07:21:22,937 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c2ca8a251d04aebb9ad00c4c30d82d2, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732778480642 2024-11-28T07:21:22,938 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7177d32ac69649c79d5867bb47d4b4cd, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732778480794 2024-11-28T07:21:22,947 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c04daf35e42411bbcaf6c6691eb7e75, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732778481241 2024-11-28T07:21:22,956 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a9fa977ccaa473bb64a8fe9ed181c8d, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778482537 2024-11-28T07:21:22,993 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#12 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:22,994 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/79c1ff1f2e004bfcb00faa097ac869c0 is 50, key is test_row_0/B:col10/1732778482684/Put/seqid=0 2024-11-28T07:21:23,002 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#13 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:23,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-28T07:21:23,003 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/6054179a711e499fbf2aeef79121ad1f is 50, key is test_row_0/A:col10/1732778482684/Put/seqid=0 2024-11-28T07:21:23,009 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:23,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-28T07:21:23,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:23,010 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-28T07:21:23,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:23,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:23,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:23,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:23,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:23,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:23,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741851_1027 (size=12139) 2024-11-28T07:21:23,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:23,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:23,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741852_1028 (size=12139) 2024-11-28T07:21:23,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/ecb0b94a509a46b2ba7b82545a1adfc5 is 50, key is test_row_0/A:col10/1732778482711/Put/seqid=0 2024-11-28T07:21:23,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741853_1029 (size=9657) 2024-11-28T07:21:23,088 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/ecb0b94a509a46b2ba7b82545a1adfc5 2024-11-28T07:21:23,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/30c16a32eda648e3b512756cba548543 is 50, key is test_row_0/B:col10/1732778482711/Put/seqid=0 2024-11-28T07:21:23,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741854_1030 (size=9657) 2024-11-28T07:21:23,132 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/30c16a32eda648e3b512756cba548543 2024-11-28T07:21:23,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/580ed7a2b6ca4444aaf2a1d58b792b24 is 50, key is test_row_0/C:col10/1732778482711/Put/seqid=0 2024-11-28T07:21:23,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741855_1031 (size=9657) 2024-11-28T07:21:23,161 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/580ed7a2b6ca4444aaf2a1d58b792b24 2024-11-28T07:21:23,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/ecb0b94a509a46b2ba7b82545a1adfc5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ecb0b94a509a46b2ba7b82545a1adfc5 2024-11-28T07:21:23,181 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ecb0b94a509a46b2ba7b82545a1adfc5, entries=100, sequenceid=86, filesize=9.4 K 2024-11-28T07:21:23,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/30c16a32eda648e3b512756cba548543 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/30c16a32eda648e3b512756cba548543 2024-11-28T07:21:23,197 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/30c16a32eda648e3b512756cba548543, entries=100, sequenceid=86, filesize=9.4 K 2024-11-28T07:21:23,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/580ed7a2b6ca4444aaf2a1d58b792b24 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/580ed7a2b6ca4444aaf2a1d58b792b24 2024-11-28T07:21:23,213 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/580ed7a2b6ca4444aaf2a1d58b792b24, entries=100, sequenceid=86, filesize=9.4 K 2024-11-28T07:21:23,220 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=134.18 KB/137400 for af0c88dc7f2cd28f9a7271a3bc766683 in 210ms, sequenceid=86, compaction requested=true 2024-11-28T07:21:23,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:23,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:23,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-28T07:21:23,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-28T07:21:23,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:23,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:21:23,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:23,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:23,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:23,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:23,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:23,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:23,227 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-28T07:21:23,227 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 522 msec 2024-11-28T07:21:23,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 534 msec 2024-11-28T07:21:23,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/de266d2c5bdc4624a875bb7f1418dbd5 is 50, key is test_row_0/A:col10/1732778483187/Put/seqid=0 2024-11-28T07:21:23,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741856_1032 (size=14341) 2024-11-28T07:21:23,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778543257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/de266d2c5bdc4624a875bb7f1418dbd5 2024-11-28T07:21:23,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778543261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778543262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778543264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778543270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/743691d392254245a5acd997b7b8fd52 is 50, key is test_row_0/B:col10/1732778483187/Put/seqid=0 2024-11-28T07:21:23,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-28T07:21:23,305 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-28T07:21:23,308 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:23,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-28T07:21:23,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T07:21:23,311 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:23,321 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:23,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:23,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741857_1033 (size=12001) 2024-11-28T07:21:23,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/743691d392254245a5acd997b7b8fd52 2024-11-28T07:21:23,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/60ed42845dfc4f32b83f83646e682330 is 50, key is test_row_0/C:col10/1732778483187/Put/seqid=0 2024-11-28T07:21:23,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778543374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778543375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778543377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778543377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778543378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741858_1034 (size=12001) 2024-11-28T07:21:23,393 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/60ed42845dfc4f32b83f83646e682330 2024-11-28T07:21:23,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/de266d2c5bdc4624a875bb7f1418dbd5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/de266d2c5bdc4624a875bb7f1418dbd5 2024-11-28T07:21:23,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T07:21:23,414 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/de266d2c5bdc4624a875bb7f1418dbd5, entries=200, sequenceid=110, filesize=14.0 K 2024-11-28T07:21:23,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/743691d392254245a5acd997b7b8fd52 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/743691d392254245a5acd997b7b8fd52 2024-11-28T07:21:23,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/743691d392254245a5acd997b7b8fd52, entries=150, sequenceid=110, filesize=11.7 K 2024-11-28T07:21:23,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/60ed42845dfc4f32b83f83646e682330 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/60ed42845dfc4f32b83f83646e682330 2024-11-28T07:21:23,441 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/79c1ff1f2e004bfcb00faa097ac869c0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/79c1ff1f2e004bfcb00faa097ac869c0 2024-11-28T07:21:23,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/60ed42845dfc4f32b83f83646e682330, entries=150, sequenceid=110, filesize=11.7 K 2024-11-28T07:21:23,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for af0c88dc7f2cd28f9a7271a3bc766683 in 225ms, sequenceid=110, compaction requested=true 2024-11-28T07:21:23,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:23,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:23,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:21:23,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:23,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T07:21:23,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:23,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-28T07:21:23,468 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/6054179a711e499fbf2aeef79121ad1f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/6054179a711e499fbf2aeef79121ad1f 2024-11-28T07:21:23,470 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into 79c1ff1f2e004bfcb00faa097ac869c0(size=11.9 K), total size for store is 33.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:23,471 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:23,471 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=12, startTime=1732778482919; duration=0sec 2024-11-28T07:21:23,472 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-28T07:21:23,472 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:23,472 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 4 compacting, 2 eligible, 16 blocking 2024-11-28T07:21:23,474 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:23,475 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T07:21:23,475 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T07:21:23,475 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. because compaction request was cancelled 2024-11-28T07:21:23,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-28T07:21:23,475 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:23,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:23,475 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-28T07:21:23,476 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:21:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:23,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:23,482 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into 6054179a711e499fbf2aeef79121ad1f(size=11.9 K), total size for store is 35.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:23,482 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:23,482 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=12, startTime=1732778482917; duration=0sec 2024-11-28T07:21:23,482 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:21:23,482 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:23,483 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:23,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/d7e32d8b86944f7184d437c1e4ffbc5c is 50, key is test_row_0/A:col10/1732778483261/Put/seqid=0 2024-11-28T07:21:23,487 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 69662 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-28T07:21:23,487 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:23,487 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:23,487 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 6 compacting, 0 eligible, 16 blocking 2024-11-28T07:21:23,487 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T07:21:23,487 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T07:21:23,487 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. because compaction request was cancelled 2024-11-28T07:21:23,487 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:23,488 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:23,488 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a6a0316196634f89a7708516218e5626, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d34df5b7ce1e43bd945df01bb4239692, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ed95a9a850e54a509414dee1a0f70766, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/081678340c0e45cba54ab58540e5097f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/580ed7a2b6ca4444aaf2a1d58b792b24, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/60ed42845dfc4f32b83f83646e682330] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=68.0 K 2024-11-28T07:21:23,491 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a6a0316196634f89a7708516218e5626, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732778480649 2024-11-28T07:21:23,492 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:23,492 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:23,492 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:23,492 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/79c1ff1f2e004bfcb00faa097ac869c0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/30c16a32eda648e3b512756cba548543, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/743691d392254245a5acd997b7b8fd52] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=33.0 K 2024-11-28T07:21:23,493 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d34df5b7ce1e43bd945df01bb4239692, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732778480826 2024-11-28T07:21:23,494 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ed95a9a850e54a509414dee1a0f70766, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732778481241 2024-11-28T07:21:23,495 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79c1ff1f2e004bfcb00faa097ac869c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778482566 2024-11-28T07:21:23,496 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30c16a32eda648e3b512756cba548543, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732778482700 2024-11-28T07:21:23,497 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 081678340c0e45cba54ab58540e5097f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778482566 2024-11-28T07:21:23,498 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 580ed7a2b6ca4444aaf2a1d58b792b24, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732778482700 2024-11-28T07:21:23,499 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 743691d392254245a5acd997b7b8fd52, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732778483187 2024-11-28T07:21:23,499 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 60ed42845dfc4f32b83f83646e682330, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732778483187 2024-11-28T07:21:23,529 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:23,530 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/ff3bc399d2b744ed87632549e61474ac is 50, key is test_row_0/B:col10/1732778483187/Put/seqid=0 2024-11-28T07:21:23,539 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#22 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:23,540 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/308092e494df4d778923dc8b95e445f4 is 50, key is test_row_0/C:col10/1732778483187/Put/seqid=0 2024-11-28T07:21:23,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741859_1035 (size=12001) 2024-11-28T07:21:23,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741860_1036 (size=12241) 2024-11-28T07:21:23,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741861_1037 (size=12207) 2024-11-28T07:21:23,582 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/ff3bc399d2b744ed87632549e61474ac as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ff3bc399d2b744ed87632549e61474ac 2024-11-28T07:21:23,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:23,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:23,590 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/308092e494df4d778923dc8b95e445f4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/308092e494df4d778923dc8b95e445f4 2024-11-28T07:21:23,596 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into ff3bc399d2b744ed87632549e61474ac(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:23,596 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:23,596 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=13, startTime=1732778483450; duration=0sec 2024-11-28T07:21:23,596 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:23,596 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:23,605 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 308092e494df4d778923dc8b95e445f4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:23,605 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:23,605 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=10, startTime=1732778483451; duration=0sec 2024-11-28T07:21:23,605 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:23,605 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:23,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T07:21:23,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778543630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778543635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778543635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778543638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778543639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778543741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778543744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778543745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778543746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778543746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T07:21:23,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778543950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778543952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778543952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,954 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/d7e32d8b86944f7184d437c1e4ffbc5c 2024-11-28T07:21:23,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778543955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:23,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778543956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:23,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/ce50fe3a443843038474cf3ef149d8d4 is 50, key is test_row_0/B:col10/1732778483261/Put/seqid=0 2024-11-28T07:21:24,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741862_1038 (size=12001) 2024-11-28T07:21:24,024 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/ce50fe3a443843038474cf3ef149d8d4 2024-11-28T07:21:24,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d46d2ea8a7404a54ad92d5529ab9af4c is 50, key is test_row_0/C:col10/1732778483261/Put/seqid=0 2024-11-28T07:21:24,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741863_1039 (size=12001) 2024-11-28T07:21:24,238 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-28T07:21:24,239 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-28T07:21:24,242 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-28T07:21:24,243 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-28T07:21:24,246 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T07:21:24,246 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-28T07:21:24,246 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-28T07:21:24,246 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-28T07:21:24,248 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-28T07:21:24,248 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-28T07:21:24,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778544256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778544257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778544260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778544261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778544277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T07:21:24,485 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d46d2ea8a7404a54ad92d5529ab9af4c 2024-11-28T07:21:24,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/d7e32d8b86944f7184d437c1e4ffbc5c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d7e32d8b86944f7184d437c1e4ffbc5c 2024-11-28T07:21:24,513 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d7e32d8b86944f7184d437c1e4ffbc5c, entries=150, sequenceid=124, filesize=11.7 K 2024-11-28T07:21:24,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/ce50fe3a443843038474cf3ef149d8d4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ce50fe3a443843038474cf3ef149d8d4 2024-11-28T07:21:24,532 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ce50fe3a443843038474cf3ef149d8d4, entries=150, sequenceid=124, filesize=11.7 K 2024-11-28T07:21:24,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d46d2ea8a7404a54ad92d5529ab9af4c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d46d2ea8a7404a54ad92d5529ab9af4c 2024-11-28T07:21:24,548 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d46d2ea8a7404a54ad92d5529ab9af4c, entries=150, sequenceid=124, filesize=11.7 K 2024-11-28T07:21:24,557 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for af0c88dc7f2cd28f9a7271a3bc766683 in 1082ms, sequenceid=124, compaction requested=true 2024-11-28T07:21:24,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-28T07:21:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-28T07:21:24,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-28T07:21:24,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2390 sec 2024-11-28T07:21:24,566 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.2560 sec 2024-11-28T07:21:24,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:21:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:24,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:24,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:24,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:24,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:24,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:24,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:24,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778544785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/893fc21154bc468cb812692927e40832 is 50, key is test_row_0/A:col10/1732778483633/Put/seqid=0 2024-11-28T07:21:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778544783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778544787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778544792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778544793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741864_1040 (size=12151) 2024-11-28T07:21:24,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/893fc21154bc468cb812692927e40832 2024-11-28T07:21:24,871 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/afd7edd1c6524843986906c6d3d59ad3 is 50, key is test_row_0/B:col10/1732778483633/Put/seqid=0 2024-11-28T07:21:24,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778544895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778544901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778544901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:24,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778544902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:24,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741865_1041 (size=12151) 2024-11-28T07:21:25,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778545099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778545107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778545108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778545111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/afd7edd1c6524843986906c6d3d59ad3 2024-11-28T07:21:25,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/f28eeb327b344b58bacf2a9c22b23d5b is 50, key is test_row_0/C:col10/1732778483633/Put/seqid=0 2024-11-28T07:21:25,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741866_1042 (size=12151) 2024-11-28T07:21:25,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/f28eeb327b344b58bacf2a9c22b23d5b 2024-11-28T07:21:25,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/893fc21154bc468cb812692927e40832 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/893fc21154bc468cb812692927e40832 2024-11-28T07:21:25,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/893fc21154bc468cb812692927e40832, entries=150, sequenceid=153, filesize=11.9 K 2024-11-28T07:21:25,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/afd7edd1c6524843986906c6d3d59ad3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/afd7edd1c6524843986906c6d3d59ad3 2024-11-28T07:21:25,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/afd7edd1c6524843986906c6d3d59ad3, entries=150, sequenceid=153, filesize=11.9 K 2024-11-28T07:21:25,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/f28eeb327b344b58bacf2a9c22b23d5b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f28eeb327b344b58bacf2a9c22b23d5b 2024-11-28T07:21:25,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778545403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778545414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T07:21:25,419 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-28T07:21:25,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778545415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,424 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778545415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f28eeb327b344b58bacf2a9c22b23d5b, entries=150, sequenceid=153, filesize=11.9 K 2024-11-28T07:21:25,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-28T07:21:25,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for af0c88dc7f2cd28f9a7271a3bc766683 in 655ms, sequenceid=153, compaction requested=true 2024-11-28T07:21:25,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:25,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:25,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:25,428 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-28T07:21:25,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:25,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:25,429 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:25,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:25,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:25,429 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:25,430 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:25,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T07:21:25,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:25,433 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:25,433 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:25,433 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:25,433 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ff3bc399d2b744ed87632549e61474ac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ce50fe3a443843038474cf3ef149d8d4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/afd7edd1c6524843986906c6d3d59ad3] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=35.5 K 2024-11-28T07:21:25,434 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ff3bc399d2b744ed87632549e61474ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732778483187 2024-11-28T07:21:25,435 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60289 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-28T07:21:25,435 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:25,435 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:25,435 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/6054179a711e499fbf2aeef79121ad1f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ecb0b94a509a46b2ba7b82545a1adfc5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/de266d2c5bdc4624a875bb7f1418dbd5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d7e32d8b86944f7184d437c1e4ffbc5c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/893fc21154bc468cb812692927e40832] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=58.9 K 2024-11-28T07:21:25,436 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ce50fe3a443843038474cf3ef149d8d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1732778483236 2024-11-28T07:21:25,436 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6054179a711e499fbf2aeef79121ad1f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778482566 2024-11-28T07:21:25,437 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting afd7edd1c6524843986906c6d3d59ad3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732778483633 2024-11-28T07:21:25,439 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting ecb0b94a509a46b2ba7b82545a1adfc5, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732778482700 2024-11-28T07:21:25,440 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting de266d2c5bdc4624a875bb7f1418dbd5, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732778483119 2024-11-28T07:21:25,441 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7e32d8b86944f7184d437c1e4ffbc5c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1732778483236 2024-11-28T07:21:25,442 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 893fc21154bc468cb812692927e40832, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732778483633 2024-11-28T07:21:25,465 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:25,466 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/93ee609f9ad44f8093fbb8d65ceb3943 is 50, key is test_row_0/B:col10/1732778483633/Put/seqid=0 2024-11-28T07:21:25,489 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#29 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:25,490 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/d81ba174d33a47178ac3ade3880e3b68 is 50, key is test_row_0/A:col10/1732778483633/Put/seqid=0 2024-11-28T07:21:25,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741868_1044 (size=12459) 2024-11-28T07:21:25,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741867_1043 (size=12493) 2024-11-28T07:21:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T07:21:25,538 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/d81ba174d33a47178ac3ade3880e3b68 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d81ba174d33a47178ac3ade3880e3b68 2024-11-28T07:21:25,549 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into d81ba174d33a47178ac3ade3880e3b68(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:25,549 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:25,550 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=11, startTime=1732778485428; duration=0sec 2024-11-28T07:21:25,550 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:25,550 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:25,550 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:25,553 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:25,553 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:25,553 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:25,553 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/308092e494df4d778923dc8b95e445f4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d46d2ea8a7404a54ad92d5529ab9af4c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f28eeb327b344b58bacf2a9c22b23d5b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=35.5 K 2024-11-28T07:21:25,554 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 308092e494df4d778923dc8b95e445f4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1732778483187 2024-11-28T07:21:25,555 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d46d2ea8a7404a54ad92d5529ab9af4c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1732778483236 2024-11-28T07:21:25,555 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f28eeb327b344b58bacf2a9c22b23d5b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732778483633 2024-11-28T07:21:25,582 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:25,583 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/8251f7dd41b94344b70cfea032f2f3ed is 50, key is test_row_0/C:col10/1732778483633/Put/seqid=0 2024-11-28T07:21:25,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:25,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-28T07:21:25,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:25,586 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-28T07:21:25,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:25,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:25,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:25,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:25,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:25,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:25,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/636be1ff13954dff9b4f30e8e0eb6f57 is 50, key is test_row_0/A:col10/1732778484785/Put/seqid=0 2024-11-28T07:21:25,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741869_1045 (size=12459) 2024-11-28T07:21:25,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741870_1046 (size=12151) 2024-11-28T07:21:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T07:21:25,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:25,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:25,937 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/93ee609f9ad44f8093fbb8d65ceb3943 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93ee609f9ad44f8093fbb8d65ceb3943 2024-11-28T07:21:25,950 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into 93ee609f9ad44f8093fbb8d65ceb3943(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:25,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778545929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,951 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:25,951 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=13, startTime=1732778485428; duration=0sec 2024-11-28T07:21:25,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778545933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,952 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:25,952 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:25,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778545951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778545952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:25,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:25,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778545953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,019 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/8251f7dd41b94344b70cfea032f2f3ed as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8251f7dd41b94344b70cfea032f2f3ed 2024-11-28T07:21:26,037 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/636be1ff13954dff9b4f30e8e0eb6f57 2024-11-28T07:21:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T07:21:26,040 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 8251f7dd41b94344b70cfea032f2f3ed(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:26,040 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:26,041 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=13, startTime=1732778485429; duration=0sec 2024-11-28T07:21:26,041 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:26,044 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:26,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778546056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/54e43af7ab8542d890fda3aed31ac13d is 50, key is test_row_0/B:col10/1732778484785/Put/seqid=0 2024-11-28T07:21:26,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778546061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778546061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778546064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741871_1047 (size=12151) 2024-11-28T07:21:26,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778546261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778546269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778546268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778546269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,489 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/54e43af7ab8542d890fda3aed31ac13d 2024-11-28T07:21:26,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d23eeb423f7940eda0bc204f5b827199 is 50, key is test_row_0/C:col10/1732778484785/Put/seqid=0 2024-11-28T07:21:26,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T07:21:26,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741872_1048 (size=12151) 2024-11-28T07:21:26,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778546566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778546575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778546574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778546575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,945 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d23eeb423f7940eda0bc204f5b827199 2024-11-28T07:21:26,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/636be1ff13954dff9b4f30e8e0eb6f57 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/636be1ff13954dff9b4f30e8e0eb6f57 2024-11-28T07:21:26,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:26,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778546957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:26,965 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/636be1ff13954dff9b4f30e8e0eb6f57, entries=150, sequenceid=164, filesize=11.9 K 2024-11-28T07:21:26,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/54e43af7ab8542d890fda3aed31ac13d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/54e43af7ab8542d890fda3aed31ac13d 2024-11-28T07:21:26,976 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/54e43af7ab8542d890fda3aed31ac13d, entries=150, sequenceid=164, filesize=11.9 K 2024-11-28T07:21:26,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d23eeb423f7940eda0bc204f5b827199 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d23eeb423f7940eda0bc204f5b827199 2024-11-28T07:21:26,987 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d23eeb423f7940eda0bc204f5b827199, entries=150, sequenceid=164, filesize=11.9 K 2024-11-28T07:21:26,991 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=167.72 KB/171750 for af0c88dc7f2cd28f9a7271a3bc766683 in 1405ms, sequenceid=164, compaction requested=false 2024-11-28T07:21:26,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:26,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:26,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-28T07:21:26,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-28T07:21:26,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-28T07:21:26,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5610 sec 2024-11-28T07:21:26,999 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.5730 sec 2024-11-28T07:21:27,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:27,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-28T07:21:27,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:27,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:27,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:27,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:27,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:27,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:27,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778547089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/47b995c82fe6414daee7244eac4bd58b is 50, key is test_row_0/A:col10/1732778487074/Put/seqid=0 2024-11-28T07:21:27,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778547090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778547092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778547096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741873_1049 (size=14541) 2024-11-28T07:21:27,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/47b995c82fe6414daee7244eac4bd58b 2024-11-28T07:21:27,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/2c88c6acc92546459c58369c19c37dcd is 50, key is test_row_0/B:col10/1732778487074/Put/seqid=0 2024-11-28T07:21:27,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741874_1050 (size=12151) 2024-11-28T07:21:27,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778547197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778547197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778547198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778547200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778547404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778547405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778547405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778547405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T07:21:27,541 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-28T07:21:27,543 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:27,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-28T07:21:27,547 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:27,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/2c88c6acc92546459c58369c19c37dcd 2024-11-28T07:21:27,549 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:27,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T07:21:27,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/0102591f9fca41fcb4137df5b167dc64 is 50, key is test_row_0/C:col10/1732778487074/Put/seqid=0 2024-11-28T07:21:27,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741875_1051 (size=12151) 2024-11-28T07:21:27,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T07:21:27,702 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:27,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-28T07:21:27,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:27,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:27,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:27,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:27,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:27,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778547708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778547709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778547710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778547711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T07:21:27,858 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:27,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-28T07:21:27,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:27,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:27,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:27,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:27,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:27,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:27,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/0102591f9fca41fcb4137df5b167dc64 2024-11-28T07:21:28,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/47b995c82fe6414daee7244eac4bd58b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/47b995c82fe6414daee7244eac4bd58b 2024-11-28T07:21:28,012 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:28,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/47b995c82fe6414daee7244eac4bd58b, entries=200, sequenceid=195, filesize=14.2 K 2024-11-28T07:21:28,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-28T07:21:28,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:28,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:28,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:28,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:28,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:28,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:28,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/2c88c6acc92546459c58369c19c37dcd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/2c88c6acc92546459c58369c19c37dcd 2024-11-28T07:21:28,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/2c88c6acc92546459c58369c19c37dcd, entries=150, sequenceid=195, filesize=11.9 K 2024-11-28T07:21:28,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/0102591f9fca41fcb4137df5b167dc64 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0102591f9fca41fcb4137df5b167dc64 2024-11-28T07:21:28,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0102591f9fca41fcb4137df5b167dc64, entries=150, sequenceid=195, filesize=11.9 K 2024-11-28T07:21:28,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for af0c88dc7f2cd28f9a7271a3bc766683 in 977ms, sequenceid=195, compaction requested=true 2024-11-28T07:21:28,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:28,054 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:28,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:28,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:28,054 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:28,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:28,056 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:28,056 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:28,057 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:28,057 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93ee609f9ad44f8093fbb8d65ceb3943, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/54e43af7ab8542d890fda3aed31ac13d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/2c88c6acc92546459c58369c19c37dcd] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=35.9 K 2024-11-28T07:21:28,057 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39151 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:28,057 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:28,057 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:28,058 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d81ba174d33a47178ac3ade3880e3b68, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/636be1ff13954dff9b4f30e8e0eb6f57, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/47b995c82fe6414daee7244eac4bd58b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=38.2 K 2024-11-28T07:21:28,058 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 93ee609f9ad44f8093fbb8d65ceb3943, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732778483633 2024-11-28T07:21:28,058 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d81ba174d33a47178ac3ade3880e3b68, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732778483633 2024-11-28T07:21:28,058 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 54e43af7ab8542d890fda3aed31ac13d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732778484782 2024-11-28T07:21:28,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:28,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:28,059 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c88c6acc92546459c58369c19c37dcd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732778485924 2024-11-28T07:21:28,059 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 636be1ff13954dff9b4f30e8e0eb6f57, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732778484782 2024-11-28T07:21:28,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:28,060 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47b995c82fe6414daee7244eac4bd58b, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732778485924 2024-11-28T07:21:28,077 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#37 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:28,078 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/5da185e8eb194c219007fa4c09c002ec is 50, key is test_row_0/B:col10/1732778487074/Put/seqid=0 2024-11-28T07:21:28,094 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#38 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:28,095 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/1d27d455ea35423496768c4cab987ba0 is 50, key is test_row_0/A:col10/1732778487074/Put/seqid=0 2024-11-28T07:21:28,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741876_1052 (size=12595) 2024-11-28T07:21:28,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T07:21:28,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741877_1053 (size=12561) 2024-11-28T07:21:28,169 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:28,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-28T07:21:28,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:28,171 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-28T07:21:28,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:28,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:28,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:28,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:28,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:28,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:28,200 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/1d27d455ea35423496768c4cab987ba0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1d27d455ea35423496768c4cab987ba0 2024-11-28T07:21:28,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/b5507d8b00154c91bdb1cbb80310dee7 is 50, key is test_row_0/A:col10/1732778487088/Put/seqid=0 2024-11-28T07:21:28,219 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into 1d27d455ea35423496768c4cab987ba0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:28,219 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:28,219 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=13, startTime=1732778488054; duration=0sec 2024-11-28T07:21:28,219 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:28,220 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:28,220 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:28,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:28,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:28,223 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:28,224 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:28,224 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:28,224 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8251f7dd41b94344b70cfea032f2f3ed, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d23eeb423f7940eda0bc204f5b827199, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0102591f9fca41fcb4137df5b167dc64] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=35.9 K 2024-11-28T07:21:28,225 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8251f7dd41b94344b70cfea032f2f3ed, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1732778483633 2024-11-28T07:21:28,225 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d23eeb423f7940eda0bc204f5b827199, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732778484782 2024-11-28T07:21:28,226 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0102591f9fca41fcb4137df5b167dc64, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732778485924 2024-11-28T07:21:28,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741878_1054 (size=9757) 2024-11-28T07:21:28,252 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/b5507d8b00154c91bdb1cbb80310dee7 2024-11-28T07:21:28,261 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#40 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:28,262 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/a1c58ecabbb540778dc16dc378e99d17 is 50, key is test_row_0/C:col10/1732778487074/Put/seqid=0 2024-11-28T07:21:28,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/884d0f00c23c446b96e5b8f497138eb9 is 50, key is test_row_0/B:col10/1732778487088/Put/seqid=0 2024-11-28T07:21:28,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741879_1055 (size=12561) 2024-11-28T07:21:28,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778548269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778548277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778548278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778548279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741880_1056 (size=9757) 2024-11-28T07:21:28,304 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/884d0f00c23c446b96e5b8f497138eb9 2024-11-28T07:21:28,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/aa143092d9734f77a602f9b91765c1f8 is 50, key is test_row_0/C:col10/1732778487088/Put/seqid=0 2024-11-28T07:21:28,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741881_1057 (size=9757) 2024-11-28T07:21:28,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778548382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778548385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778548386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778548387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,517 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/5da185e8eb194c219007fa4c09c002ec as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5da185e8eb194c219007fa4c09c002ec 2024-11-28T07:21:28,526 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into 5da185e8eb194c219007fa4c09c002ec(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:28,527 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:28,527 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=13, startTime=1732778488054; duration=0sec 2024-11-28T07:21:28,527 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:28,527 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:28,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778548588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778548588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778548588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778548592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T07:21:28,685 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/a1c58ecabbb540778dc16dc378e99d17 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a1c58ecabbb540778dc16dc378e99d17 2024-11-28T07:21:28,701 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into a1c58ecabbb540778dc16dc378e99d17(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:28,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:28,701 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=13, startTime=1732778488059; duration=0sec 2024-11-28T07:21:28,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:28,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:28,739 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/aa143092d9734f77a602f9b91765c1f8 2024-11-28T07:21:28,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/b5507d8b00154c91bdb1cbb80310dee7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b5507d8b00154c91bdb1cbb80310dee7 2024-11-28T07:21:28,758 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b5507d8b00154c91bdb1cbb80310dee7, entries=100, sequenceid=203, filesize=9.5 K 2024-11-28T07:21:28,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/884d0f00c23c446b96e5b8f497138eb9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/884d0f00c23c446b96e5b8f497138eb9 2024-11-28T07:21:28,769 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/884d0f00c23c446b96e5b8f497138eb9, entries=100, sequenceid=203, filesize=9.5 K 2024-11-28T07:21:28,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/aa143092d9734f77a602f9b91765c1f8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa143092d9734f77a602f9b91765c1f8 2024-11-28T07:21:28,800 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa143092d9734f77a602f9b91765c1f8, entries=100, sequenceid=203, filesize=9.5 K 2024-11-28T07:21:28,802 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for af0c88dc7f2cd28f9a7271a3bc766683 in 630ms, sequenceid=203, compaction requested=false 2024-11-28T07:21:28,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:28,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:28,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-28T07:21:28,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-28T07:21:28,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-28T07:21:28,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2550 sec 2024-11-28T07:21:28,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.2650 sec 2024-11-28T07:21:28,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:28,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-28T07:21:28,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:28,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:28,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:28,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:28,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:28,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:28,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778548902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778548902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778548903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778548906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/3b197017e1c34778b1294cb707d8af61 is 50, key is test_row_0/A:col10/1732778488898/Put/seqid=0 2024-11-28T07:21:28,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741882_1058 (size=12151) 2024-11-28T07:21:28,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/3b197017e1c34778b1294cb707d8af61 2024-11-28T07:21:28,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/c830dc83571c46efb09e05ba589c2705 is 50, key is test_row_0/B:col10/1732778488898/Put/seqid=0 2024-11-28T07:21:28,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:28,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778548976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:28,978 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4186 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:21:28,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741883_1059 (size=12151) 2024-11-28T07:21:28,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/c830dc83571c46efb09e05ba589c2705 2024-11-28T07:21:28,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/8decff08d24e4e8fa73b4b7dcc4a82ee is 50, key is test_row_0/C:col10/1732778488898/Put/seqid=0 2024-11-28T07:21:29,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778549007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778549009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778549010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778549011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741884_1060 (size=12151) 2024-11-28T07:21:29,028 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/8decff08d24e4e8fa73b4b7dcc4a82ee 2024-11-28T07:21:29,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/3b197017e1c34778b1294cb707d8af61 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3b197017e1c34778b1294cb707d8af61 2024-11-28T07:21:29,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3b197017e1c34778b1294cb707d8af61, entries=150, sequenceid=237, filesize=11.9 K 2024-11-28T07:21:29,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/c830dc83571c46efb09e05ba589c2705 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c830dc83571c46efb09e05ba589c2705 2024-11-28T07:21:29,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c830dc83571c46efb09e05ba589c2705, entries=150, sequenceid=237, filesize=11.9 K 2024-11-28T07:21:29,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/8decff08d24e4e8fa73b4b7dcc4a82ee as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8decff08d24e4e8fa73b4b7dcc4a82ee 2024-11-28T07:21:29,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8decff08d24e4e8fa73b4b7dcc4a82ee, entries=150, sequenceid=237, filesize=11.9 K 2024-11-28T07:21:29,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for af0c88dc7f2cd28f9a7271a3bc766683 in 178ms, sequenceid=237, compaction requested=true 2024-11-28T07:21:29,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:29,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:29,077 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:29,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:29,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:29,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:29,077 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:29,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:29,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:29,078 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:29,078 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:29,078 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:29,078 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:29,079 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:29,079 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:29,079 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5da185e8eb194c219007fa4c09c002ec, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/884d0f00c23c446b96e5b8f497138eb9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c830dc83571c46efb09e05ba589c2705] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=33.7 K 2024-11-28T07:21:29,079 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1d27d455ea35423496768c4cab987ba0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b5507d8b00154c91bdb1cbb80310dee7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3b197017e1c34778b1294cb707d8af61] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=33.7 K 2024-11-28T07:21:29,079 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5da185e8eb194c219007fa4c09c002ec, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732778485924 2024-11-28T07:21:29,080 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d27d455ea35423496768c4cab987ba0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732778485924 2024-11-28T07:21:29,080 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 884d0f00c23c446b96e5b8f497138eb9, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732778487086 2024-11-28T07:21:29,080 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5507d8b00154c91bdb1cbb80310dee7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732778487086 2024-11-28T07:21:29,081 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c830dc83571c46efb09e05ba589c2705, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732778488277 2024-11-28T07:21:29,081 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b197017e1c34778b1294cb707d8af61, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732778488277 2024-11-28T07:21:29,111 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#46 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:29,111 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/25df1fa4d0f14238a1be248c4a917cef is 50, key is test_row_0/B:col10/1732778488898/Put/seqid=0 2024-11-28T07:21:29,114 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#47 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:29,115 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/872a584ac1a84e65bc108afb12056b42 is 50, key is test_row_0/A:col10/1732778488898/Put/seqid=0 2024-11-28T07:21:29,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741885_1061 (size=12697) 2024-11-28T07:21:29,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741886_1062 (size=12663) 2024-11-28T07:21:29,146 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/872a584ac1a84e65bc108afb12056b42 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/872a584ac1a84e65bc108afb12056b42 2024-11-28T07:21:29,155 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into 872a584ac1a84e65bc108afb12056b42(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:29,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:29,155 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=13, startTime=1732778489077; duration=0sec 2024-11-28T07:21:29,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:29,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:29,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:29,157 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:29,157 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:29,157 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:29,157 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a1c58ecabbb540778dc16dc378e99d17, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa143092d9734f77a602f9b91765c1f8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8decff08d24e4e8fa73b4b7dcc4a82ee] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=33.7 K 2024-11-28T07:21:29,158 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1c58ecabbb540778dc16dc378e99d17, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732778485924 2024-11-28T07:21:29,159 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa143092d9734f77a602f9b91765c1f8, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732778487086 2024-11-28T07:21:29,160 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8decff08d24e4e8fa73b4b7dcc4a82ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732778488277 2024-11-28T07:21:29,176 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#48 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:29,177 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/88373fad88374dcb86a345dcc5bd9b18 is 50, key is test_row_0/C:col10/1732778488898/Put/seqid=0 2024-11-28T07:21:29,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741887_1063 (size=12663) 2024-11-28T07:21:29,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:29,224 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:29,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:29,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/66aa061ef35b490f92f0ab1358d58ea9 is 50, key is test_row_0/A:col10/1732778489222/Put/seqid=0 2024-11-28T07:21:29,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741888_1064 (size=14541) 2024-11-28T07:21:29,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/66aa061ef35b490f92f0ab1358d58ea9 2024-11-28T07:21:29,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778549272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/e2573f6bd9c044f69e7cc247c623c7aa is 50, key is test_row_0/B:col10/1732778489222/Put/seqid=0 2024-11-28T07:21:29,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778549270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778549280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778549280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741889_1065 (size=12151) 2024-11-28T07:21:29,305 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/e2573f6bd9c044f69e7cc247c623c7aa 2024-11-28T07:21:29,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d461b0941cdb405fa2f1285bae2df13b is 50, key is test_row_0/C:col10/1732778489222/Put/seqid=0 2024-11-28T07:21:29,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741890_1066 (size=12151) 2024-11-28T07:21:29,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d461b0941cdb405fa2f1285bae2df13b 2024-11-28T07:21:29,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/66aa061ef35b490f92f0ab1358d58ea9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/66aa061ef35b490f92f0ab1358d58ea9 2024-11-28T07:21:29,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/66aa061ef35b490f92f0ab1358d58ea9, entries=200, sequenceid=250, filesize=14.2 K 2024-11-28T07:21:29,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/e2573f6bd9c044f69e7cc247c623c7aa as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e2573f6bd9c044f69e7cc247c623c7aa 2024-11-28T07:21:29,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e2573f6bd9c044f69e7cc247c623c7aa, entries=150, sequenceid=250, filesize=11.9 K 2024-11-28T07:21:29,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d461b0941cdb405fa2f1285bae2df13b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d461b0941cdb405fa2f1285bae2df13b 2024-11-28T07:21:29,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778549383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778549384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778549386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778549386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d461b0941cdb405fa2f1285bae2df13b, entries=150, sequenceid=250, filesize=11.9 K 2024-11-28T07:21:29,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for af0c88dc7f2cd28f9a7271a3bc766683 in 167ms, sequenceid=250, compaction requested=false 2024-11-28T07:21:29,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:29,541 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/25df1fa4d0f14238a1be248c4a917cef as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/25df1fa4d0f14238a1be248c4a917cef 2024-11-28T07:21:29,550 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into 25df1fa4d0f14238a1be248c4a917cef(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:29,550 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:29,550 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=13, startTime=1732778489077; duration=0sec 2024-11-28T07:21:29,550 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:29,550 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:29,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T07:21:29,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:29,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:29,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:29,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:29,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:29,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:29,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778549600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778549601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/3a04a66bd8c24bffa387a3038fa0bac0 is 50, key is test_row_0/A:col10/1732778489590/Put/seqid=0 2024-11-28T07:21:29,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778549603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778549604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,605 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/88373fad88374dcb86a345dcc5bd9b18 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/88373fad88374dcb86a345dcc5bd9b18 2024-11-28T07:21:29,620 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 88373fad88374dcb86a345dcc5bd9b18(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:29,620 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:29,620 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=13, startTime=1732778489077; duration=0sec 2024-11-28T07:21:29,620 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:29,620 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:29,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741891_1067 (size=17181) 2024-11-28T07:21:29,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/3a04a66bd8c24bffa387a3038fa0bac0 2024-11-28T07:21:29,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/bad3fa27a96543f3bd016fa1ce0ee1aa is 50, key is test_row_0/B:col10/1732778489590/Put/seqid=0 2024-11-28T07:21:29,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T07:21:29,660 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-28T07:21:29,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-28T07:21:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-28T07:21:29,665 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:29,665 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:29,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:29,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741892_1068 (size=12301) 2024-11-28T07:21:29,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/bad3fa27a96543f3bd016fa1ce0ee1aa 2024-11-28T07:21:29,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778549704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778549706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778549706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/f31b3b7eb0b7442a99542b7b3d7cd39d is 50, key is test_row_0/C:col10/1732778489590/Put/seqid=0 2024-11-28T07:21:29,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778549705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741893_1069 (size=12301) 2024-11-28T07:21:29,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/f31b3b7eb0b7442a99542b7b3d7cd39d 2024-11-28T07:21:29,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/3a04a66bd8c24bffa387a3038fa0bac0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3a04a66bd8c24bffa387a3038fa0bac0 2024-11-28T07:21:29,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3a04a66bd8c24bffa387a3038fa0bac0, entries=250, sequenceid=277, filesize=16.8 K 2024-11-28T07:21:29,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/bad3fa27a96543f3bd016fa1ce0ee1aa as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bad3fa27a96543f3bd016fa1ce0ee1aa 2024-11-28T07:21:29,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bad3fa27a96543f3bd016fa1ce0ee1aa, entries=150, sequenceid=277, filesize=12.0 K 2024-11-28T07:21:29,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/f31b3b7eb0b7442a99542b7b3d7cd39d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f31b3b7eb0b7442a99542b7b3d7cd39d 2024-11-28T07:21:29,763 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f31b3b7eb0b7442a99542b7b3d7cd39d, entries=150, sequenceid=277, filesize=12.0 K 2024-11-28T07:21:29,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for af0c88dc7f2cd28f9a7271a3bc766683 in 173ms, sequenceid=277, compaction requested=true 2024-11-28T07:21:29,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-28T07:21:29,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:29,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:29,765 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:29,765 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:29,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:29,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:29,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:29,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:29,767 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:29,767 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:29,767 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:29,767 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/25df1fa4d0f14238a1be248c4a917cef, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e2573f6bd9c044f69e7cc247c623c7aa, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bad3fa27a96543f3bd016fa1ce0ee1aa] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=36.3 K 2024-11-28T07:21:29,768 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44385 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:29,768 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:29,768 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:29,768 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/872a584ac1a84e65bc108afb12056b42, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/66aa061ef35b490f92f0ab1358d58ea9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3a04a66bd8c24bffa387a3038fa0bac0] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=43.3 K 2024-11-28T07:21:29,768 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 25df1fa4d0f14238a1be248c4a917cef, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732778488277 2024-11-28T07:21:29,768 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 872a584ac1a84e65bc108afb12056b42, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732778488277 2024-11-28T07:21:29,769 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e2573f6bd9c044f69e7cc247c623c7aa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732778488900 2024-11-28T07:21:29,769 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66aa061ef35b490f92f0ab1358d58ea9, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732778488900 2024-11-28T07:21:29,769 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting bad3fa27a96543f3bd016fa1ce0ee1aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778489275 2024-11-28T07:21:29,769 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a04a66bd8c24bffa387a3038fa0bac0, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778489259 2024-11-28T07:21:29,791 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#55 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:29,793 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/f3669ad9cfcc43d89186c0abd6f35f5a is 50, key is test_row_0/A:col10/1732778489590/Put/seqid=0 2024-11-28T07:21:29,797 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#56 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:29,799 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/6f23f5e747d24581b8419931dfbc6ef8 is 50, key is test_row_0/B:col10/1732778489590/Put/seqid=0 2024-11-28T07:21:29,820 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:29,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-28T07:21:29,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:29,821 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-28T07:21:29,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:29,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:29,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:29,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:29,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:29,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:29,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741894_1070 (size=12915) 2024-11-28T07:21:29,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/4755983048ff4de48c961ce9d3873f27 is 50, key is test_row_0/A:col10/1732778489600/Put/seqid=0 2024-11-28T07:21:29,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741895_1071 (size=12949) 2024-11-28T07:21:29,859 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/6f23f5e747d24581b8419931dfbc6ef8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6f23f5e747d24581b8419931dfbc6ef8 2024-11-28T07:21:29,868 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into 6f23f5e747d24581b8419931dfbc6ef8(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:29,868 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:29,868 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=13, startTime=1732778489765; duration=0sec 2024-11-28T07:21:29,868 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:29,869 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:29,870 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:29,871 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:29,872 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:29,872 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:29,872 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/88373fad88374dcb86a345dcc5bd9b18, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d461b0941cdb405fa2f1285bae2df13b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f31b3b7eb0b7442a99542b7b3d7cd39d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=36.2 K 2024-11-28T07:21:29,872 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 88373fad88374dcb86a345dcc5bd9b18, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732778488277 2024-11-28T07:21:29,873 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d461b0941cdb405fa2f1285bae2df13b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732778488900 2024-11-28T07:21:29,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741896_1072 (size=12301) 2024-11-28T07:21:29,875 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/4755983048ff4de48c961ce9d3873f27 2024-11-28T07:21:29,883 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting f31b3b7eb0b7442a99542b7b3d7cd39d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778489275 2024-11-28T07:21:29,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/5af7ba73fc464f4a85d49d0aa388ddc1 is 50, key is test_row_0/B:col10/1732778489600/Put/seqid=0 2024-11-28T07:21:29,906 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#59 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:29,907 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/9f4d152d7caa4114ac0dc12838a49d22 is 50, key is test_row_0/C:col10/1732778489590/Put/seqid=0 2024-11-28T07:21:29,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741897_1073 (size=12301) 2024-11-28T07:21:29,910 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/5af7ba73fc464f4a85d49d0aa388ddc1 2024-11-28T07:21:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:29,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:29,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741898_1074 (size=12915) 2024-11-28T07:21:29,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/501022436163424484a0579633e149c8 is 50, key is test_row_0/C:col10/1732778489600/Put/seqid=0 2024-11-28T07:21:29,945 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/9f4d152d7caa4114ac0dc12838a49d22 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9f4d152d7caa4114ac0dc12838a49d22 2024-11-28T07:21:29,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741899_1075 (size=12301) 2024-11-28T07:21:29,949 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/501022436163424484a0579633e149c8 2024-11-28T07:21:29,956 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 9f4d152d7caa4114ac0dc12838a49d22(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:29,956 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:29,956 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=13, startTime=1732778489766; duration=0sec 2024-11-28T07:21:29,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/4755983048ff4de48c961ce9d3873f27 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4755983048ff4de48c961ce9d3873f27 2024-11-28T07:21:29,956 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:29,956 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:29,965 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4755983048ff4de48c961ce9d3873f27, entries=150, sequenceid=288, filesize=12.0 K 2024-11-28T07:21:29,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-28T07:21:29,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/5af7ba73fc464f4a85d49d0aa388ddc1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5af7ba73fc464f4a85d49d0aa388ddc1 2024-11-28T07:21:29,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778549962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778549963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778549963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:29,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778549964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:29,972 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5af7ba73fc464f4a85d49d0aa388ddc1, entries=150, sequenceid=288, filesize=12.0 K 2024-11-28T07:21:29,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/501022436163424484a0579633e149c8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/501022436163424484a0579633e149c8 2024-11-28T07:21:29,980 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/501022436163424484a0579633e149c8, entries=150, sequenceid=288, filesize=12.0 K 2024-11-28T07:21:29,981 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for af0c88dc7f2cd28f9a7271a3bc766683 in 160ms, sequenceid=288, compaction requested=false 2024-11-28T07:21:29,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:29,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:29,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-28T07:21:29,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-28T07:21:29,985 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-28T07:21:29,986 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 317 msec 2024-11-28T07:21:29,989 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 324 msec 2024-11-28T07:21:30,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:30,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-28T07:21:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:30,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:30,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778550078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778550081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778550081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/ae0f7f3df2f3480a890a7cc6fabc7aa8 is 50, key is test_row_0/A:col10/1732778489962/Put/seqid=0 2024-11-28T07:21:30,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778550082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741900_1076 (size=12301) 2024-11-28T07:21:30,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/ae0f7f3df2f3480a890a7cc6fabc7aa8 2024-11-28T07:21:30,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/599bec92658447e48e849f55a1d571f0 is 50, key is test_row_0/B:col10/1732778489962/Put/seqid=0 2024-11-28T07:21:30,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741901_1077 (size=12301) 2024-11-28T07:21:30,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/599bec92658447e48e849f55a1d571f0 2024-11-28T07:21:30,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/3ebdebe8c6594d33894dd77fb915e39b is 50, key is test_row_0/C:col10/1732778489962/Put/seqid=0 2024-11-28T07:21:30,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778550183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778550184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778550186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778550187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741902_1078 (size=12301) 2024-11-28T07:21:30,205 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/3ebdebe8c6594d33894dd77fb915e39b 2024-11-28T07:21:30,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/ae0f7f3df2f3480a890a7cc6fabc7aa8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ae0f7f3df2f3480a890a7cc6fabc7aa8 2024-11-28T07:21:30,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ae0f7f3df2f3480a890a7cc6fabc7aa8, entries=150, sequenceid=318, filesize=12.0 K 2024-11-28T07:21:30,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/599bec92658447e48e849f55a1d571f0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/599bec92658447e48e849f55a1d571f0 2024-11-28T07:21:30,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/599bec92658447e48e849f55a1d571f0, entries=150, sequenceid=318, filesize=12.0 K 2024-11-28T07:21:30,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/3ebdebe8c6594d33894dd77fb915e39b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/3ebdebe8c6594d33894dd77fb915e39b 2024-11-28T07:21:30,247 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/3ebdebe8c6594d33894dd77fb915e39b, entries=150, sequenceid=318, filesize=12.0 K 2024-11-28T07:21:30,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for af0c88dc7f2cd28f9a7271a3bc766683 in 181ms, sequenceid=318, compaction requested=true 2024-11-28T07:21:30,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:30,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:30,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:30,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:30,253 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 3 compacting, 2 eligible, 16 blocking 2024-11-28T07:21:30,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:30,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:30,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:21:30,254 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T07:21:30,254 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T07:21:30,254 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. because compaction request was cancelled 2024-11-28T07:21:30,254 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:30,255 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:30,256 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:30,257 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:30,257 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:30,257 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6f23f5e747d24581b8419931dfbc6ef8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5af7ba73fc464f4a85d49d0aa388ddc1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/599bec92658447e48e849f55a1d571f0] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=36.7 K 2024-11-28T07:21:30,257 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f23f5e747d24581b8419931dfbc6ef8, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778489275 2024-11-28T07:21:30,258 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5af7ba73fc464f4a85d49d0aa388ddc1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732778489598 2024-11-28T07:21:30,259 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/f3669ad9cfcc43d89186c0abd6f35f5a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f3669ad9cfcc43d89186c0abd6f35f5a 2024-11-28T07:21:30,259 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 599bec92658447e48e849f55a1d571f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732778489960 2024-11-28T07:21:30,267 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into f3669ad9cfcc43d89186c0abd6f35f5a(size=12.6 K), total size for store is 36.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:30,267 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:30,267 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=13, startTime=1732778489765; duration=0sec 2024-11-28T07:21:30,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-28T07:21:30,267 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:30,267 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:30,268 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:30,268 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-28T07:21:30,269 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:30,269 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:30,269 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:30,269 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9f4d152d7caa4114ac0dc12838a49d22, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/501022436163424484a0579633e149c8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/3ebdebe8c6594d33894dd77fb915e39b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=36.6 K 2024-11-28T07:21:30,270 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#64 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:30,270 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f4d152d7caa4114ac0dc12838a49d22, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778489275 2024-11-28T07:21:30,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:30,270 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/d4a191a1af6b403ab3f2693ba5e4b666 is 50, key is test_row_0/B:col10/1732778489962/Put/seqid=0 2024-11-28T07:21:30,272 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 501022436163424484a0579633e149c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732778489598 2024-11-28T07:21:30,272 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ebdebe8c6594d33894dd77fb915e39b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732778489960 2024-11-28T07:21:30,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-28T07:21:30,274 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:30,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T07:21:30,275 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:30,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:30,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741903_1079 (size=13051) 2024-11-28T07:21:30,290 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#65 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:30,291 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/36a1755b02c941fcb07977e30e7d1c6b is 50, key is test_row_0/C:col10/1732778489962/Put/seqid=0 2024-11-28T07:21:30,296 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/d4a191a1af6b403ab3f2693ba5e4b666 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d4a191a1af6b403ab3f2693ba5e4b666 2024-11-28T07:21:30,304 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into d4a191a1af6b403ab3f2693ba5e4b666(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:30,304 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:30,304 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=13, startTime=1732778490253; duration=0sec 2024-11-28T07:21:30,305 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:30,305 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:30,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741904_1080 (size=13017) 2024-11-28T07:21:30,317 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/36a1755b02c941fcb07977e30e7d1c6b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/36a1755b02c941fcb07977e30e7d1c6b 2024-11-28T07:21:30,325 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 36a1755b02c941fcb07977e30e7d1c6b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:30,325 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:30,325 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=13, startTime=1732778490253; duration=0sec 2024-11-28T07:21:30,325 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:30,325 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T07:21:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:30,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:21:30,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:30,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:30,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:30,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:30,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:30,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:30,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/0978f99079a648a6991ca008712bb8f9 is 50, key is test_row_0/A:col10/1732778490080/Put/seqid=0 2024-11-28T07:21:30,429 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:30,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-28T07:21:30,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:30,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:30,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:30,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:30,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:30,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:30,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778550431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778550431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741905_1081 (size=12301) 2024-11-28T07:21:30,438 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/0978f99079a648a6991ca008712bb8f9 2024-11-28T07:21:30,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778550435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778550435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/02328af6c2534521938f2a17d660d9f1 is 50, key is test_row_0/B:col10/1732778490080/Put/seqid=0 2024-11-28T07:21:30,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741906_1082 (size=12301) 2024-11-28T07:21:30,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/02328af6c2534521938f2a17d660d9f1 2024-11-28T07:21:30,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d25d6f7e40d245eba818d78aed5e8bbc is 50, key is test_row_0/C:col10/1732778490080/Put/seqid=0 2024-11-28T07:21:30,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741907_1083 (size=12301) 2024-11-28T07:21:30,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d25d6f7e40d245eba818d78aed5e8bbc 2024-11-28T07:21:30,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/0978f99079a648a6991ca008712bb8f9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/0978f99079a648a6991ca008712bb8f9 2024-11-28T07:21:30,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/0978f99079a648a6991ca008712bb8f9, entries=150, sequenceid=333, filesize=12.0 K 2024-11-28T07:21:30,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778550538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778550538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/02328af6c2534521938f2a17d660d9f1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/02328af6c2534521938f2a17d660d9f1 2024-11-28T07:21:30,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778550542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778550541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/02328af6c2534521938f2a17d660d9f1, entries=150, sequenceid=333, filesize=12.0 K 2024-11-28T07:21:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d25d6f7e40d245eba818d78aed5e8bbc as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d25d6f7e40d245eba818d78aed5e8bbc 2024-11-28T07:21:30,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d25d6f7e40d245eba818d78aed5e8bbc, entries=150, sequenceid=333, filesize=12.0 K 2024-11-28T07:21:30,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for af0c88dc7f2cd28f9a7271a3bc766683 in 163ms, sequenceid=333, compaction requested=true 2024-11-28T07:21:30,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:30,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:30,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:30,558 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:30,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:30,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:30,558 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-28T07:21:30,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:30,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:30,559 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T07:21:30,559 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T07:21:30,559 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. because compaction request was cancelled 2024-11-28T07:21:30,559 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:30,559 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-28T07:21:30,560 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:30,560 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:30,560 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:30,561 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f3669ad9cfcc43d89186c0abd6f35f5a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4755983048ff4de48c961ce9d3873f27, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ae0f7f3df2f3480a890a7cc6fabc7aa8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/0978f99079a648a6991ca008712bb8f9] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=48.7 K 2024-11-28T07:21:30,561 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T07:21:30,561 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3669ad9cfcc43d89186c0abd6f35f5a, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778489275 2024-11-28T07:21:30,561 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T07:21:30,561 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. because compaction request was cancelled 2024-11-28T07:21:30,561 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:30,562 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4755983048ff4de48c961ce9d3873f27, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732778489598 2024-11-28T07:21:30,562 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae0f7f3df2f3480a890a7cc6fabc7aa8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732778489960 2024-11-28T07:21:30,563 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0978f99079a648a6991ca008712bb8f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732778490076 2024-11-28T07:21:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T07:21:30,579 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:30,579 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/16645ff93de5492cbd613fc99fbcf176 is 50, key is test_row_0/A:col10/1732778490080/Put/seqid=0 2024-11-28T07:21:30,583 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:30,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-28T07:21:30,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:30,584 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:21:30,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:30,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:30,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:30,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:30,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:30,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:30,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/f5c0294fec654fb89cecce8429e5f518 is 50, key is test_row_0/A:col10/1732778490430/Put/seqid=0 2024-11-28T07:21:30,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741908_1084 (size=13051) 2024-11-28T07:21:30,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741909_1085 (size=12301) 2024-11-28T07:21:30,631 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/16645ff93de5492cbd613fc99fbcf176 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/16645ff93de5492cbd613fc99fbcf176 2024-11-28T07:21:30,632 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/f5c0294fec654fb89cecce8429e5f518 2024-11-28T07:21:30,641 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into 16645ff93de5492cbd613fc99fbcf176(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:30,641 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:30,641 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=12, startTime=1732778490558; duration=0sec 2024-11-28T07:21:30,641 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:30,641 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:30,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/563ef0ab3e1c4fa7acd2b9db10edf03e is 50, key is test_row_0/B:col10/1732778490430/Put/seqid=0 2024-11-28T07:21:30,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741910_1086 (size=12301) 2024-11-28T07:21:30,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:30,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:30,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778550752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778550752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778550759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778550760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778550862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778550862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:30,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778550864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778550864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:30,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T07:21:31,064 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/563ef0ab3e1c4fa7acd2b9db10edf03e 2024-11-28T07:21:31,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778551066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778551067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778551070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778551073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/406c4df216aa4a25aa90ab95f41abd43 is 50, key is test_row_0/C:col10/1732778490430/Put/seqid=0 2024-11-28T07:21:31,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741911_1087 (size=12301) 2024-11-28T07:21:31,101 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/406c4df216aa4a25aa90ab95f41abd43 2024-11-28T07:21:31,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/f5c0294fec654fb89cecce8429e5f518 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f5c0294fec654fb89cecce8429e5f518 2024-11-28T07:21:31,135 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f5c0294fec654fb89cecce8429e5f518, entries=150, sequenceid=357, filesize=12.0 K 2024-11-28T07:21:31,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/563ef0ab3e1c4fa7acd2b9db10edf03e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/563ef0ab3e1c4fa7acd2b9db10edf03e 2024-11-28T07:21:31,145 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/563ef0ab3e1c4fa7acd2b9db10edf03e, entries=150, sequenceid=357, filesize=12.0 K 2024-11-28T07:21:31,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/406c4df216aa4a25aa90ab95f41abd43 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/406c4df216aa4a25aa90ab95f41abd43 2024-11-28T07:21:31,155 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/406c4df216aa4a25aa90ab95f41abd43, entries=150, sequenceid=357, filesize=12.0 K 2024-11-28T07:21:31,157 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for af0c88dc7f2cd28f9a7271a3bc766683 in 572ms, sequenceid=357, compaction requested=true 2024-11-28T07:21:31,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:31,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:31,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-28T07:21:31,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-28T07:21:31,161 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-28T07:21:31,161 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 883 msec 2024-11-28T07:21:31,163 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 890 msec 2024-11-28T07:21:31,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:31,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:21:31,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:31,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:31,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:31,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:31,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:31,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:31,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T07:21:31,379 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-28T07:21:31,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e49a022a00784a6f9965113d347c3d10 is 50, key is test_row_0/A:col10/1732778491370/Put/seqid=0 2024-11-28T07:21:31,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:31,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-28T07:21:31,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T07:21:31,386 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:31,387 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:31,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:31,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741912_1088 (size=14741) 2024-11-28T07:21:31,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e49a022a00784a6f9965113d347c3d10 2024-11-28T07:21:31,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/8dba9b80f0a944c3b60f82dcb5f1424b is 50, key is test_row_0/B:col10/1732778491370/Put/seqid=0 2024-11-28T07:21:31,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778551411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778551419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778551419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778551420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741913_1089 (size=12301) 2024-11-28T07:21:31,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T07:21:31,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778551523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778551527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778551528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778551528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,539 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:31,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T07:21:31,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:31,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:31,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:31,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:31,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:31,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:31,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T07:21:31,693 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:31,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T07:21:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:31,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:31,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:31,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778551726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778551732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778551734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:31,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778551736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:31,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/8dba9b80f0a944c3b60f82dcb5f1424b 2024-11-28T07:21:31,848 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:31,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T07:21:31,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:31,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:31,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:31,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:31,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:31,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/fbff2f05ca5743b28b58158f6b3dba64 is 50, key is test_row_0/C:col10/1732778491370/Put/seqid=0 2024-11-28T07:21:31,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741914_1090 (size=12301) 2024-11-28T07:21:31,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/fbff2f05ca5743b28b58158f6b3dba64 2024-11-28T07:21:31,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e49a022a00784a6f9965113d347c3d10 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e49a022a00784a6f9965113d347c3d10 2024-11-28T07:21:31,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e49a022a00784a6f9965113d347c3d10, entries=200, sequenceid=371, filesize=14.4 K 2024-11-28T07:21:31,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/8dba9b80f0a944c3b60f82dcb5f1424b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8dba9b80f0a944c3b60f82dcb5f1424b 2024-11-28T07:21:31,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8dba9b80f0a944c3b60f82dcb5f1424b, entries=150, sequenceid=371, filesize=12.0 K 2024-11-28T07:21:31,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/fbff2f05ca5743b28b58158f6b3dba64 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/fbff2f05ca5743b28b58158f6b3dba64 2024-11-28T07:21:31,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/fbff2f05ca5743b28b58158f6b3dba64, entries=150, sequenceid=371, filesize=12.0 K 2024-11-28T07:21:31,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for af0c88dc7f2cd28f9a7271a3bc766683 in 544ms, sequenceid=371, compaction requested=true 2024-11-28T07:21:31,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:31,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:31,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:31,917 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:31,917 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:31,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:31,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:31,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:31,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:31,918 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:31,919 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:31,919 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:31,919 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/16645ff93de5492cbd613fc99fbcf176, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f5c0294fec654fb89cecce8429e5f518, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e49a022a00784a6f9965113d347c3d10] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=39.2 K 2024-11-28T07:21:31,919 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:31,919 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:31,919 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:31,919 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d4a191a1af6b403ab3f2693ba5e4b666, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/02328af6c2534521938f2a17d660d9f1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/563ef0ab3e1c4fa7acd2b9db10edf03e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8dba9b80f0a944c3b60f82dcb5f1424b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=48.8 K 2024-11-28T07:21:31,920 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d4a191a1af6b403ab3f2693ba5e4b666, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732778489960 2024-11-28T07:21:31,921 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 02328af6c2534521938f2a17d660d9f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732778490076 2024-11-28T07:21:31,921 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 563ef0ab3e1c4fa7acd2b9db10edf03e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732778490428 2024-11-28T07:21:31,922 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8dba9b80f0a944c3b60f82dcb5f1424b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732778490755 2024-11-28T07:21:31,924 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16645ff93de5492cbd613fc99fbcf176, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732778490076 2024-11-28T07:21:31,924 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5c0294fec654fb89cecce8429e5f518, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732778490428 2024-11-28T07:21:31,925 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e49a022a00784a6f9965113d347c3d10, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732778490742 2024-11-28T07:21:31,954 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#76 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:31,955 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/a9373a1f63d845269b4438132fb2fa6c is 50, key is test_row_0/B:col10/1732778491370/Put/seqid=0 2024-11-28T07:21:31,957 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#77 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:31,958 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/56d74c5b7cfa48de886b92d3001fe3a0 is 50, key is test_row_0/A:col10/1732778491370/Put/seqid=0 2024-11-28T07:21:31,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741915_1091 (size=13187) 2024-11-28T07:21:31,989 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/a9373a1f63d845269b4438132fb2fa6c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/a9373a1f63d845269b4438132fb2fa6c 2024-11-28T07:21:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T07:21:31,999 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into a9373a1f63d845269b4438132fb2fa6c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:31,999 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:31,999 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=12, startTime=1732778491917; duration=0sec 2024-11-28T07:21:31,999 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:32,000 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:32,000 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:32,002 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:32,003 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:32,003 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:32,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T07:21:32,003 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:32,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:32,003 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/36a1755b02c941fcb07977e30e7d1c6b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d25d6f7e40d245eba818d78aed5e8bbc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/406c4df216aa4a25aa90ab95f41abd43, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/fbff2f05ca5743b28b58158f6b3dba64] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=48.8 K 2024-11-28T07:21:32,003 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T07:21:32,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:32,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:32,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:32,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:32,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:32,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:32,004 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 36a1755b02c941fcb07977e30e7d1c6b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732778489960 2024-11-28T07:21:32,005 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d25d6f7e40d245eba818d78aed5e8bbc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732778490076 2024-11-28T07:21:32,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741916_1092 (size=13153) 2024-11-28T07:21:32,007 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 406c4df216aa4a25aa90ab95f41abd43, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1732778490428 2024-11-28T07:21:32,008 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting fbff2f05ca5743b28b58158f6b3dba64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732778490755 2024-11-28T07:21:32,013 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/56d74c5b7cfa48de886b92d3001fe3a0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/56d74c5b7cfa48de886b92d3001fe3a0 2024-11-28T07:21:32,020 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into 56d74c5b7cfa48de886b92d3001fe3a0(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:32,020 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:32,020 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=13, startTime=1732778491917; duration=0sec 2024-11-28T07:21:32,020 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:32,020 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:32,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/ea498121ce2f4843bfb3163b8503c087 is 50, key is test_row_0/A:col10/1732778491408/Put/seqid=0 2024-11-28T07:21:32,033 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#79 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:32,034 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/9711c7dab8cf498baf1e71f697ca227f is 50, key is test_row_0/C:col10/1732778491370/Put/seqid=0 2024-11-28T07:21:32,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:32,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741917_1093 (size=12301) 2024-11-28T07:21:32,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741918_1094 (size=13153) 2024-11-28T07:21:32,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778552051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778552056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778552056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778552056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778552157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778552162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778552161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778552165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778552361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778552364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778552367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778552369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,451 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/ea498121ce2f4843bfb3163b8503c087 2024-11-28T07:21:32,462 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/9711c7dab8cf498baf1e71f697ca227f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9711c7dab8cf498baf1e71f697ca227f 2024-11-28T07:21:32,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/62ee64e1b9474149a7fff2c6b54ab479 is 50, key is test_row_0/B:col10/1732778491408/Put/seqid=0 2024-11-28T07:21:32,472 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 9711c7dab8cf498baf1e71f697ca227f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:32,472 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:32,472 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=12, startTime=1732778491917; duration=0sec 2024-11-28T07:21:32,473 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:32,473 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:32,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741919_1095 (size=12301) 2024-11-28T07:21:32,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T07:21:32,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778552664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778552667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778552671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778552672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,893 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/62ee64e1b9474149a7fff2c6b54ab479 2024-11-28T07:21:32,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/6cae9ce366184434976d84a3d74f97f1 is 50, key is test_row_0/C:col10/1732778491408/Put/seqid=0 2024-11-28T07:21:32,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741920_1096 (size=12301) 2024-11-28T07:21:32,955 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/6cae9ce366184434976d84a3d74f97f1 2024-11-28T07:21:32,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/ea498121ce2f4843bfb3163b8503c087 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ea498121ce2f4843bfb3163b8503c087 2024-11-28T07:21:32,975 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ea498121ce2f4843bfb3163b8503c087, entries=150, sequenceid=395, filesize=12.0 K 2024-11-28T07:21:32,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/62ee64e1b9474149a7fff2c6b54ab479 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/62ee64e1b9474149a7fff2c6b54ab479 2024-11-28T07:21:32,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:32,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50356 deadline: 1732778552983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:32,985 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:21:32,989 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/62ee64e1b9474149a7fff2c6b54ab479, entries=150, sequenceid=395, filesize=12.0 K 2024-11-28T07:21:32,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/6cae9ce366184434976d84a3d74f97f1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/6cae9ce366184434976d84a3d74f97f1 2024-11-28T07:21:32,999 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/6cae9ce366184434976d84a3d74f97f1, entries=150, sequenceid=395, filesize=12.0 K 2024-11-28T07:21:33,001 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for af0c88dc7f2cd28f9a7271a3bc766683 in 998ms, sequenceid=395, compaction requested=false 2024-11-28T07:21:33,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:33,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-28T07:21:33,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-28T07:21:33,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-28T07:21:33,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6160 sec 2024-11-28T07:21:33,008 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.6230 sec 2024-11-28T07:21:33,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:33,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T07:21:33,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:33,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:33,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:33,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:33,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:33,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:33,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/3e1aa9e28dc747779c824eccd0f02a22 is 50, key is test_row_0/A:col10/1732778493167/Put/seqid=0 2024-11-28T07:21:33,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778553193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778553195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778553196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778553197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741921_1097 (size=14741) 2024-11-28T07:21:33,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778553298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778553299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778553301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778553302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T07:21:33,492 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-28T07:21:33,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:33,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-28T07:21:33,503 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:33,505 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:33,505 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:33,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778553504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778553504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T07:21:33,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778553506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778553506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T07:21:33,615 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/3e1aa9e28dc747779c824eccd0f02a22 2024-11-28T07:21:33,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/be84293200694e9b8bc5032187b940e9 is 50, key is test_row_0/B:col10/1732778493167/Put/seqid=0 2024-11-28T07:21:33,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741922_1098 (size=12301) 2024-11-28T07:21:33,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/be84293200694e9b8bc5032187b940e9 2024-11-28T07:21:33,659 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:33,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-28T07:21:33,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:33,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:33,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:33,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:33,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/9084e829a0354f23b374b61758900d09 is 50, key is test_row_0/C:col10/1732778493167/Put/seqid=0 2024-11-28T07:21:33,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741923_1099 (size=12301) 2024-11-28T07:21:33,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=413 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/9084e829a0354f23b374b61758900d09 2024-11-28T07:21:33,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/3e1aa9e28dc747779c824eccd0f02a22 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3e1aa9e28dc747779c824eccd0f02a22 2024-11-28T07:21:33,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3e1aa9e28dc747779c824eccd0f02a22, entries=200, sequenceid=413, filesize=14.4 K 2024-11-28T07:21:33,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/be84293200694e9b8bc5032187b940e9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/be84293200694e9b8bc5032187b940e9 2024-11-28T07:21:33,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/be84293200694e9b8bc5032187b940e9, entries=150, sequenceid=413, filesize=12.0 K 2024-11-28T07:21:33,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/9084e829a0354f23b374b61758900d09 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9084e829a0354f23b374b61758900d09 2024-11-28T07:21:33,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9084e829a0354f23b374b61758900d09, entries=150, sequenceid=413, filesize=12.0 K 2024-11-28T07:21:33,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for af0c88dc7f2cd28f9a7271a3bc766683 in 582ms, sequenceid=413, compaction requested=true 2024-11-28T07:21:33,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:33,752 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:33,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:33,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:33,753 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:33,754 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:33,754 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:33,754 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,754 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/56d74c5b7cfa48de886b92d3001fe3a0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ea498121ce2f4843bfb3163b8503c087, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3e1aa9e28dc747779c824eccd0f02a22] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=39.3 K 2024-11-28T07:21:33,755 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56d74c5b7cfa48de886b92d3001fe3a0, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732778490755 2024-11-28T07:21:33,755 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:33,755 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:33,755 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,755 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/a9373a1f63d845269b4438132fb2fa6c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/62ee64e1b9474149a7fff2c6b54ab479, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/be84293200694e9b8bc5032187b940e9] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=36.9 K 2024-11-28T07:21:33,756 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea498121ce2f4843bfb3163b8503c087, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732778491408 2024-11-28T07:21:33,756 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a9373a1f63d845269b4438132fb2fa6c, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732778490755 2024-11-28T07:21:33,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:33,756 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e1aa9e28dc747779c824eccd0f02a22, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732778492053 2024-11-28T07:21:33,757 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 62ee64e1b9474149a7fff2c6b54ab479, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732778491408 2024-11-28T07:21:33,758 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting be84293200694e9b8bc5032187b940e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732778492053 2024-11-28T07:21:33,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:33,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:33,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:33,769 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#85 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:33,769 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#86 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:33,770 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/d84b23776cab4aa39839b7ced87ef3d8 is 50, key is test_row_0/B:col10/1732778493167/Put/seqid=0 2024-11-28T07:21:33,770 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/b042921da0664a938f49dd45b9d979c8 is 50, key is test_row_0/A:col10/1732778493167/Put/seqid=0 2024-11-28T07:21:33,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741924_1100 (size=13289) 2024-11-28T07:21:33,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741925_1101 (size=13255) 2024-11-28T07:21:33,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T07:21:33,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:33,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T07:21:33,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:33,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:33,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:33,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:33,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:33,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:33,813 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:33,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-28T07:21:33,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:33,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,814 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:33,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:33,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:33,816 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/b042921da0664a938f49dd45b9d979c8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b042921da0664a938f49dd45b9d979c8 2024-11-28T07:21:33,821 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/c808903417074cf1ac6e2d4b64f1b32e is 50, key is test_row_0/A:col10/1732778493194/Put/seqid=0 2024-11-28T07:21:33,829 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into b042921da0664a938f49dd45b9d979c8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:33,830 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:33,830 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=13, startTime=1732778493752; duration=0sec 2024-11-28T07:21:33,830 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:33,830 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:33,830 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:33,832 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:33,833 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:33,833 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,833 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9711c7dab8cf498baf1e71f697ca227f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/6cae9ce366184434976d84a3d74f97f1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9084e829a0354f23b374b61758900d09] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=36.9 K 2024-11-28T07:21:33,833 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9711c7dab8cf498baf1e71f697ca227f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732778490755 2024-11-28T07:21:33,834 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cae9ce366184434976d84a3d74f97f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732778491408 2024-11-28T07:21:33,835 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9084e829a0354f23b374b61758900d09, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732778492053 2024-11-28T07:21:33,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741926_1102 (size=14741) 2024-11-28T07:21:33,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/c808903417074cf1ac6e2d4b64f1b32e 2024-11-28T07:21:33,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778553835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778553837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778553840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/ffb6504fb81049898e0eb88e95f6f016 is 50, key is test_row_0/B:col10/1732778493194/Put/seqid=0 2024-11-28T07:21:33,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778553837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,851 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#89 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:33,852 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/9678aca61bf54f85b3709f66036d230d is 50, key is test_row_0/C:col10/1732778493167/Put/seqid=0 2024-11-28T07:21:33,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741927_1103 (size=12301) 2024-11-28T07:21:33,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/ffb6504fb81049898e0eb88e95f6f016 2024-11-28T07:21:33,891 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d7617a6ef323423e92f0960e6d7cc5c7 is 50, key is test_row_0/C:col10/1732778493194/Put/seqid=0 2024-11-28T07:21:33,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741928_1104 (size=13255) 2024-11-28T07:21:33,937 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/9678aca61bf54f85b3709f66036d230d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9678aca61bf54f85b3709f66036d230d 2024-11-28T07:21:33,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778553942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778553945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778553947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,950 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 9678aca61bf54f85b3709f66036d230d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:33,950 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:33,951 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=13, startTime=1732778493758; duration=0sec 2024-11-28T07:21:33,951 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:33,952 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:33,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741929_1105 (size=12301) 2024-11-28T07:21:33,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778553949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:33,967 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:33,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-28T07:21:33,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:33,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:33,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:33,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:33,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T07:21:34,122 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:34,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-28T07:21:34,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:34,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:34,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:34,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:34,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:34,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:34,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778554146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778554149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778554150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778554156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,209 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/d84b23776cab4aa39839b7ced87ef3d8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d84b23776cab4aa39839b7ced87ef3d8 2024-11-28T07:21:34,218 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into d84b23776cab4aa39839b7ced87ef3d8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:34,218 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:34,219 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=13, startTime=1732778493753; duration=0sec 2024-11-28T07:21:34,219 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:34,219 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:34,275 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:34,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-28T07:21:34,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:34,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:34,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:34,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:34,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:34,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:34,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d7617a6ef323423e92f0960e6d7cc5c7 2024-11-28T07:21:34,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/c808903417074cf1ac6e2d4b64f1b32e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/c808903417074cf1ac6e2d4b64f1b32e 2024-11-28T07:21:34,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/c808903417074cf1ac6e2d4b64f1b32e, entries=200, sequenceid=434, filesize=14.4 K 2024-11-28T07:21:34,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/ffb6504fb81049898e0eb88e95f6f016 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ffb6504fb81049898e0eb88e95f6f016 2024-11-28T07:21:34,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ffb6504fb81049898e0eb88e95f6f016, entries=150, sequenceid=434, filesize=12.0 K 2024-11-28T07:21:34,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d7617a6ef323423e92f0960e6d7cc5c7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d7617a6ef323423e92f0960e6d7cc5c7 2024-11-28T07:21:34,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d7617a6ef323423e92f0960e6d7cc5c7, entries=150, sequenceid=434, filesize=12.0 K 2024-11-28T07:21:34,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for af0c88dc7f2cd28f9a7271a3bc766683 in 588ms, sequenceid=434, compaction requested=false 2024-11-28T07:21:34,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:34,428 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:34,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-28T07:21:34,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:34,429 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:21:34,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:34,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:34,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:34,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:34,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:34,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:34,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e545d44915274624a24721a5d23eee20 is 50, key is test_row_0/A:col10/1732778493838/Put/seqid=0 2024-11-28T07:21:34,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741930_1106 (size=12301) 2024-11-28T07:21:34,442 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e545d44915274624a24721a5d23eee20 2024-11-28T07:21:34,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:34,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:34,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/3e16fe36d8e4466fac6e895126f9bc7f is 50, key is test_row_0/B:col10/1732778493838/Put/seqid=0 2024-11-28T07:21:34,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741931_1107 (size=12301) 2024-11-28T07:21:34,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778554470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778554470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778554471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778554472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778554577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778554577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778554578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778554580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T07:21:34,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778554780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778554780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778554780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:34,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778554783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:34,859 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/3e16fe36d8e4466fac6e895126f9bc7f 2024-11-28T07:21:34,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/1522e4ed5d4e4cf6a99546db5afedb71 is 50, key is test_row_0/C:col10/1732778493838/Put/seqid=0 2024-11-28T07:21:34,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741932_1108 (size=12301) 2024-11-28T07:21:34,898 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/1522e4ed5d4e4cf6a99546db5afedb71 2024-11-28T07:21:34,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e545d44915274624a24721a5d23eee20 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e545d44915274624a24721a5d23eee20 2024-11-28T07:21:34,912 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e545d44915274624a24721a5d23eee20, entries=150, sequenceid=452, filesize=12.0 K 2024-11-28T07:21:34,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/3e16fe36d8e4466fac6e895126f9bc7f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/3e16fe36d8e4466fac6e895126f9bc7f 2024-11-28T07:21:34,920 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/3e16fe36d8e4466fac6e895126f9bc7f, entries=150, sequenceid=452, filesize=12.0 K 2024-11-28T07:21:34,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/1522e4ed5d4e4cf6a99546db5afedb71 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1522e4ed5d4e4cf6a99546db5afedb71 2024-11-28T07:21:34,928 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1522e4ed5d4e4cf6a99546db5afedb71, entries=150, sequenceid=452, filesize=12.0 K 2024-11-28T07:21:34,929 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for af0c88dc7f2cd28f9a7271a3bc766683 in 500ms, sequenceid=452, compaction requested=true 2024-11-28T07:21:34,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:34,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:34,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-28T07:21:34,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-28T07:21:34,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-28T07:21:34,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4280 sec 2024-11-28T07:21:34,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.4430 sec 2024-11-28T07:21:35,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:35,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T07:21:35,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:35,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:35,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:35,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:35,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:35,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:35,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e4d52d8331be48bdaf4db66e4c5b5b22 is 50, key is test_row_0/A:col10/1732778494468/Put/seqid=0 2024-11-28T07:21:35,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778555101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778555103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778555101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778555103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741933_1109 (size=14741) 2024-11-28T07:21:35,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e4d52d8331be48bdaf4db66e4c5b5b22 2024-11-28T07:21:35,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/88622588f9ce470a9b43775c3d72b594 is 50, key is test_row_0/B:col10/1732778494468/Put/seqid=0 2024-11-28T07:21:35,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741934_1110 (size=12301) 2024-11-28T07:21:35,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/88622588f9ce470a9b43775c3d72b594 2024-11-28T07:21:35,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/35fd10f1b3774aefb915e6613c5e3639 is 50, key is test_row_0/C:col10/1732778494468/Put/seqid=0 2024-11-28T07:21:35,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741935_1111 (size=12301) 2024-11-28T07:21:35,192 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/35fd10f1b3774aefb915e6613c5e3639 2024-11-28T07:21:35,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e4d52d8331be48bdaf4db66e4c5b5b22 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e4d52d8331be48bdaf4db66e4c5b5b22 2024-11-28T07:21:35,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e4d52d8331be48bdaf4db66e4c5b5b22, entries=200, sequenceid=474, filesize=14.4 K 2024-11-28T07:21:35,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778555207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/88622588f9ce470a9b43775c3d72b594 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/88622588f9ce470a9b43775c3d72b594 2024-11-28T07:21:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778555208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778555208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778555208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/88622588f9ce470a9b43775c3d72b594, entries=150, sequenceid=474, filesize=12.0 K 2024-11-28T07:21:35,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/35fd10f1b3774aefb915e6613c5e3639 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/35fd10f1b3774aefb915e6613c5e3639 2024-11-28T07:21:35,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/35fd10f1b3774aefb915e6613c5e3639, entries=150, sequenceid=474, filesize=12.0 K 2024-11-28T07:21:35,228 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for af0c88dc7f2cd28f9a7271a3bc766683 in 142ms, sequenceid=474, compaction requested=true 2024-11-28T07:21:35,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:35,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:35,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:35,229 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:35,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:35,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:35,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:35,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:21:35,229 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:35,230 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:35,231 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55038 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:35,231 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:35,231 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:35,231 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:35,231 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:35,231 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d84b23776cab4aa39839b7ced87ef3d8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ffb6504fb81049898e0eb88e95f6f016, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/3e16fe36d8e4466fac6e895126f9bc7f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/88622588f9ce470a9b43775c3d72b594] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=49.0 K 2024-11-28T07:21:35,231 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b042921da0664a938f49dd45b9d979c8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/c808903417074cf1ac6e2d4b64f1b32e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e545d44915274624a24721a5d23eee20, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e4d52d8331be48bdaf4db66e4c5b5b22] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=53.7 K 2024-11-28T07:21:35,231 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b042921da0664a938f49dd45b9d979c8, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732778492053 2024-11-28T07:21:35,232 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d84b23776cab4aa39839b7ced87ef3d8, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732778492053 2024-11-28T07:21:35,232 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting c808903417074cf1ac6e2d4b64f1b32e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732778493193 2024-11-28T07:21:35,232 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ffb6504fb81049898e0eb88e95f6f016, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732778493194 2024-11-28T07:21:35,233 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e545d44915274624a24721a5d23eee20, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732778493814 2024-11-28T07:21:35,233 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e16fe36d8e4466fac6e895126f9bc7f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732778493814 2024-11-28T07:21:35,233 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4d52d8331be48bdaf4db66e4c5b5b22, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732778494467 2024-11-28T07:21:35,234 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 88622588f9ce470a9b43775c3d72b594, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732778494468 2024-11-28T07:21:35,248 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#97 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:35,249 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/dadf023a87a04385b565b96b8bae294f is 50, key is test_row_0/A:col10/1732778494468/Put/seqid=0 2024-11-28T07:21:35,261 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#98 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:35,262 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/436cef79889246f4aa2e37e2b6043372 is 50, key is test_row_0/B:col10/1732778494468/Put/seqid=0 2024-11-28T07:21:35,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741936_1112 (size=13391) 2024-11-28T07:21:35,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741937_1113 (size=13425) 2024-11-28T07:21:35,290 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/436cef79889246f4aa2e37e2b6043372 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/436cef79889246f4aa2e37e2b6043372 2024-11-28T07:21:35,297 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into 436cef79889246f4aa2e37e2b6043372(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:35,297 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:35,297 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=12, startTime=1732778495229; duration=0sec 2024-11-28T07:21:35,298 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:35,298 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:35,298 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:35,300 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:35,300 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:35,300 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:35,300 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9678aca61bf54f85b3709f66036d230d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d7617a6ef323423e92f0960e6d7cc5c7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1522e4ed5d4e4cf6a99546db5afedb71, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/35fd10f1b3774aefb915e6613c5e3639] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=49.0 K 2024-11-28T07:21:35,301 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9678aca61bf54f85b3709f66036d230d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=413, earliestPutTs=1732778492053 2024-11-28T07:21:35,302 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d7617a6ef323423e92f0960e6d7cc5c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1732778493194 2024-11-28T07:21:35,302 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1522e4ed5d4e4cf6a99546db5afedb71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732778493814 2024-11-28T07:21:35,302 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 35fd10f1b3774aefb915e6613c5e3639, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732778494468 2024-11-28T07:21:35,313 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#99 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:35,314 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/1e493b151cda47d8a16e6f216409a94c is 50, key is test_row_0/C:col10/1732778494468/Put/seqid=0 2024-11-28T07:21:35,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741938_1114 (size=13391) 2024-11-28T07:21:35,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:35,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:21:35,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:35,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:35,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/511113fc153144e3b21c93dc5ad303d1 is 50, key is test_row_0/A:col10/1732778495101/Put/seqid=0 2024-11-28T07:21:35,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778555433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778555436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778555438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778555437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741939_1115 (size=12301) 2024-11-28T07:21:35,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/511113fc153144e3b21c93dc5ad303d1 2024-11-28T07:21:35,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/8c8ad6165c3445e7adc318000576bc26 is 50, key is test_row_0/B:col10/1732778495101/Put/seqid=0 2024-11-28T07:21:35,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741940_1116 (size=12301) 2024-11-28T07:21:35,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/8c8ad6165c3445e7adc318000576bc26 2024-11-28T07:21:35,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d60ed96aa64646998148c7d8d4be7b33 is 50, key is test_row_0/C:col10/1732778495101/Put/seqid=0 2024-11-28T07:21:35,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741941_1117 (size=12301) 2024-11-28T07:21:35,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d60ed96aa64646998148c7d8d4be7b33 2024-11-28T07:21:35,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/511113fc153144e3b21c93dc5ad303d1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/511113fc153144e3b21c93dc5ad303d1 2024-11-28T07:21:35,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/511113fc153144e3b21c93dc5ad303d1, entries=150, sequenceid=490, filesize=12.0 K 2024-11-28T07:21:35,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/8c8ad6165c3445e7adc318000576bc26 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8c8ad6165c3445e7adc318000576bc26 2024-11-28T07:21:35,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8c8ad6165c3445e7adc318000576bc26, entries=150, sequenceid=490, filesize=12.0 K 2024-11-28T07:21:35,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778555539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778555542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/d60ed96aa64646998148c7d8d4be7b33 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d60ed96aa64646998148c7d8d4be7b33 2024-11-28T07:21:35,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778555546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778555546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d60ed96aa64646998148c7d8d4be7b33, entries=150, sequenceid=490, filesize=12.0 K 2024-11-28T07:21:35,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for af0c88dc7f2cd28f9a7271a3bc766683 in 140ms, sequenceid=490, compaction requested=false 2024-11-28T07:21:35,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:35,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T07:21:35,613 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-28T07:21:35,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:35,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-28T07:21:35,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T07:21:35,616 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:35,617 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:35,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:35,686 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/dadf023a87a04385b565b96b8bae294f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/dadf023a87a04385b565b96b8bae294f 2024-11-28T07:21:35,693 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into dadf023a87a04385b565b96b8bae294f(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:35,693 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:35,693 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=12, startTime=1732778495228; duration=0sec 2024-11-28T07:21:35,693 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:35,693 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:35,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T07:21:35,734 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/1e493b151cda47d8a16e6f216409a94c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1e493b151cda47d8a16e6f216409a94c 2024-11-28T07:21:35,741 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 1e493b151cda47d8a16e6f216409a94c(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:35,741 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:35,741 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=12, startTime=1732778495229; duration=0sec 2024-11-28T07:21:35,741 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:35,742 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:35,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:35,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T07:21:35,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:35,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:35,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:35,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:35,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:35,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:35,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/d8c6c52dbd1f4fd99baca8f36f825ae9 is 50, key is test_row_0/A:col10/1732778495746/Put/seqid=0 2024-11-28T07:21:35,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778555758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778555757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741942_1118 (size=14741) 2024-11-28T07:21:35,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/d8c6c52dbd1f4fd99baca8f36f825ae9 2024-11-28T07:21:35,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778555759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778555759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/86b81261a02843889abd287ee613de87 is 50, key is test_row_0/B:col10/1732778495746/Put/seqid=0 2024-11-28T07:21:35,773 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:35,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-28T07:21:35,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:35,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:35,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:35,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:35,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:35,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:35,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741943_1119 (size=12301) 2024-11-28T07:21:35,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778555861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778555863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:35,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778555864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T07:21:35,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:35,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-28T07:21:35,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:35,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:35,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:35,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:35,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778556061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778556065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778556067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778556068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,083 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:36,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-28T07:21:36,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:36,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,084 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/86b81261a02843889abd287ee613de87 2024-11-28T07:21:36,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/bfd4429387d3473a9937df7ee87fbaba is 50, key is test_row_0/C:col10/1732778495746/Put/seqid=0 2024-11-28T07:21:36,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741944_1120 (size=12301) 2024-11-28T07:21:36,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T07:21:36,236 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:36,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-28T07:21:36,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:36,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778556367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778556369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778556373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,389 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:36,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-28T07:21:36,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:36,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,390 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,542 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:36,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-28T07:21:36,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:36,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:36,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778556568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/bfd4429387d3473a9937df7ee87fbaba 2024-11-28T07:21:36,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/d8c6c52dbd1f4fd99baca8f36f825ae9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d8c6c52dbd1f4fd99baca8f36f825ae9 2024-11-28T07:21:36,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d8c6c52dbd1f4fd99baca8f36f825ae9, entries=200, sequenceid=516, filesize=14.4 K 2024-11-28T07:21:36,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/86b81261a02843889abd287ee613de87 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/86b81261a02843889abd287ee613de87 2024-11-28T07:21:36,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/86b81261a02843889abd287ee613de87, entries=150, sequenceid=516, filesize=12.0 K 2024-11-28T07:21:36,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/bfd4429387d3473a9937df7ee87fbaba as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bfd4429387d3473a9937df7ee87fbaba 2024-11-28T07:21:36,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bfd4429387d3473a9937df7ee87fbaba, entries=150, sequenceid=516, filesize=12.0 K 2024-11-28T07:21:36,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for af0c88dc7f2cd28f9a7271a3bc766683 in 893ms, sequenceid=516, compaction requested=true 2024-11-28T07:21:36,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:36,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:36,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:36,639 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:36,639 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:36,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:36,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:36,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:36,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:36,641 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:36,641 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:36,641 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,641 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/dadf023a87a04385b565b96b8bae294f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/511113fc153144e3b21c93dc5ad303d1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d8c6c52dbd1f4fd99baca8f36f825ae9] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=39.5 K 2024-11-28T07:21:36,641 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:36,642 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:36,642 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,642 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/436cef79889246f4aa2e37e2b6043372, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8c8ad6165c3445e7adc318000576bc26, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/86b81261a02843889abd287ee613de87] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=37.1 K 2024-11-28T07:21:36,642 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting dadf023a87a04385b565b96b8bae294f, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732778494468 2024-11-28T07:21:36,642 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 436cef79889246f4aa2e37e2b6043372, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732778494468 2024-11-28T07:21:36,642 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 511113fc153144e3b21c93dc5ad303d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732778495093 2024-11-28T07:21:36,643 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c8ad6165c3445e7adc318000576bc26, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732778495093 2024-11-28T07:21:36,643 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8c6c52dbd1f4fd99baca8f36f825ae9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1732778495428 2024-11-28T07:21:36,643 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 86b81261a02843889abd287ee613de87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1732778495437 2024-11-28T07:21:36,654 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#106 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:36,654 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/93553e73b6354c1ebcf4180bc1f6c892 is 50, key is test_row_0/B:col10/1732778495746/Put/seqid=0 2024-11-28T07:21:36,654 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#107 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:36,655 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/91a39f4c83564f35abca69ef7231eae0 is 50, key is test_row_0/A:col10/1732778495746/Put/seqid=0 2024-11-28T07:21:36,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741945_1121 (size=13527) 2024-11-28T07:21:36,664 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/93553e73b6354c1ebcf4180bc1f6c892 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93553e73b6354c1ebcf4180bc1f6c892 2024-11-28T07:21:36,670 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into 93553e73b6354c1ebcf4180bc1f6c892(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:36,670 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:36,670 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=13, startTime=1732778496639; duration=0sec 2024-11-28T07:21:36,671 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:36,671 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:36,671 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:36,672 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:36,672 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:36,672 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,673 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1e493b151cda47d8a16e6f216409a94c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d60ed96aa64646998148c7d8d4be7b33, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bfd4429387d3473a9937df7ee87fbaba] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=37.1 K 2024-11-28T07:21:36,673 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e493b151cda47d8a16e6f216409a94c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732778494468 2024-11-28T07:21:36,673 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d60ed96aa64646998148c7d8d4be7b33, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732778495093 2024-11-28T07:21:36,674 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting bfd4429387d3473a9937df7ee87fbaba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1732778495437 2024-11-28T07:21:36,686 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#108 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:36,687 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/4bb0b7b4560b4ca6b99c0099f3c32f8e is 50, key is test_row_0/C:col10/1732778495746/Put/seqid=0 2024-11-28T07:21:36,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741946_1122 (size=13493) 2024-11-28T07:21:36,696 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:36,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-28T07:21:36,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:36,697 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:21:36,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:36,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:36,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:36,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:36,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:36,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:36,699 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/91a39f4c83564f35abca69ef7231eae0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/91a39f4c83564f35abca69ef7231eae0 2024-11-28T07:21:36,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741947_1123 (size=13493) 2024-11-28T07:21:36,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/32c9faf9116b4c3bad13a42d0a644c4a is 50, key is test_row_0/A:col10/1732778495757/Put/seqid=0 2024-11-28T07:21:36,714 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into 91a39f4c83564f35abca69ef7231eae0(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:36,714 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:36,714 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=13, startTime=1732778496639; duration=0sec 2024-11-28T07:21:36,715 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:36,715 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:36,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T07:21:36,721 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/4bb0b7b4560b4ca6b99c0099f3c32f8e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/4bb0b7b4560b4ca6b99c0099f3c32f8e 2024-11-28T07:21:36,733 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 4bb0b7b4560b4ca6b99c0099f3c32f8e(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:36,733 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:36,733 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=13, startTime=1732778496639; duration=0sec 2024-11-28T07:21:36,733 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:36,734 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:36,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741948_1124 (size=12301) 2024-11-28T07:21:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:36,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:36,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778556892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778556893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778556894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778556997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:36,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778556997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:37,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778556999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:37,135 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/32c9faf9116b4c3bad13a42d0a644c4a 2024-11-28T07:21:37,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/bffc3a15456548aabd5678e718fbd843 is 50, key is test_row_0/B:col10/1732778495757/Put/seqid=0 2024-11-28T07:21:37,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741949_1125 (size=12301) 2024-11-28T07:21:37,168 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/bffc3a15456548aabd5678e718fbd843 2024-11-28T07:21:37,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/8bd417daab6f492d907a45316bc10bfe is 50, key is test_row_0/C:col10/1732778495757/Put/seqid=0 2024-11-28T07:21:37,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741950_1126 (size=12301) 2024-11-28T07:21:37,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:37,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778557200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:37,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778557201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:37,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778557202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:37,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778557502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:37,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:37,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778557505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:37,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:37,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778557507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:37,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:37,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778557573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:37,599 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/8bd417daab6f492d907a45316bc10bfe 2024-11-28T07:21:37,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/32c9faf9116b4c3bad13a42d0a644c4a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/32c9faf9116b4c3bad13a42d0a644c4a 2024-11-28T07:21:37,612 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/32c9faf9116b4c3bad13a42d0a644c4a, entries=150, sequenceid=530, filesize=12.0 K 2024-11-28T07:21:37,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/bffc3a15456548aabd5678e718fbd843 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bffc3a15456548aabd5678e718fbd843 2024-11-28T07:21:37,619 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bffc3a15456548aabd5678e718fbd843, entries=150, sequenceid=530, filesize=12.0 K 2024-11-28T07:21:37,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/8bd417daab6f492d907a45316bc10bfe as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8bd417daab6f492d907a45316bc10bfe 2024-11-28T07:21:37,632 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8bd417daab6f492d907a45316bc10bfe, entries=150, sequenceid=530, filesize=12.0 K 2024-11-28T07:21:37,634 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for af0c88dc7f2cd28f9a7271a3bc766683 in 937ms, sequenceid=530, compaction requested=false 2024-11-28T07:21:37,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:37,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:37,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-28T07:21:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-28T07:21:37,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-28T07:21:37,638 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0190 sec 2024-11-28T07:21:37,642 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 2.0250 sec 2024-11-28T07:21:37,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T07:21:37,721 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-28T07:21:37,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:37,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-28T07:21:37,724 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:37,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T07:21:37,725 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:37,725 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T07:21:37,877 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:37,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-28T07:21:37,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:37,878 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T07:21:37,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:37,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:37,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:37,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:37,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:37,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:37,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/4350ef7371df46399c61f6ca23d49fc1 is 50, key is test_row_0/A:col10/1732778496892/Put/seqid=0 2024-11-28T07:21:37,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741951_1127 (size=12301) 2024-11-28T07:21:37,906 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/4350ef7371df46399c61f6ca23d49fc1 2024-11-28T07:21:37,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/b751604b5b504d43b8136f9661db46c9 is 50, key is test_row_0/B:col10/1732778496892/Put/seqid=0 2024-11-28T07:21:37,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741952_1128 (size=12301) 2024-11-28T07:21:37,944 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/b751604b5b504d43b8136f9661db46c9 2024-11-28T07:21:37,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/0a466356f29f41369ad9b3fec2310610 is 50, key is test_row_0/C:col10/1732778496892/Put/seqid=0 2024-11-28T07:21:37,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741953_1129 (size=12301) 2024-11-28T07:21:38,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:38,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:38,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778558017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778558018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778558019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T07:21:38,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778558120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778558120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778558121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778558322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778558323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778558324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T07:21:38,361 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/0a466356f29f41369ad9b3fec2310610 2024-11-28T07:21:38,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/4350ef7371df46399c61f6ca23d49fc1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4350ef7371df46399c61f6ca23d49fc1 2024-11-28T07:21:38,376 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4350ef7371df46399c61f6ca23d49fc1, entries=150, sequenceid=555, filesize=12.0 K 2024-11-28T07:21:38,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/b751604b5b504d43b8136f9661db46c9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/b751604b5b504d43b8136f9661db46c9 2024-11-28T07:21:38,382 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/b751604b5b504d43b8136f9661db46c9, entries=150, sequenceid=555, filesize=12.0 K 2024-11-28T07:21:38,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/0a466356f29f41369ad9b3fec2310610 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0a466356f29f41369ad9b3fec2310610 2024-11-28T07:21:38,388 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0a466356f29f41369ad9b3fec2310610, entries=150, sequenceid=555, filesize=12.0 K 2024-11-28T07:21:38,389 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for af0c88dc7f2cd28f9a7271a3bc766683 in 511ms, sequenceid=555, compaction requested=true 2024-11-28T07:21:38,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:38,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:38,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-28T07:21:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-28T07:21:38,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-28T07:21:38,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 665 msec 2024-11-28T07:21:38,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 670 msec 2024-11-28T07:21:38,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:38,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T07:21:38,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:38,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:38,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:38,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:38,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:38,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:38,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/afc824c3d6b9461ba9245cf137d5b2d1 is 50, key is test_row_0/A:col10/1732778498016/Put/seqid=0 2024-11-28T07:21:38,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778558659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778558659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778558660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741954_1130 (size=17181) 2024-11-28T07:21:38,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778558764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778558764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778558765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T07:21:38,828 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-28T07:21:38,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:38,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-11-28T07:21:38,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T07:21:38,831 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:38,832 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:38,832 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:38,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T07:21:38,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778558967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778558969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778558970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:38,984 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:38,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T07:21:38,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:38,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:38,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:38,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:38,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,091 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=570 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/afc824c3d6b9461ba9245cf137d5b2d1 2024-11-28T07:21:39,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/476498d2518e4beb8e162bf023948b67 is 50, key is test_row_0/B:col10/1732778498016/Put/seqid=0 2024-11-28T07:21:39,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741955_1131 (size=12301) 2024-11-28T07:21:39,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=570 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/476498d2518e4beb8e162bf023948b67 2024-11-28T07:21:39,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/bbc46fe232944492bff7f334c32aec9f is 50, key is test_row_0/C:col10/1732778498016/Put/seqid=0 2024-11-28T07:21:39,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741956_1132 (size=12301) 2024-11-28T07:21:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T07:21:39,137 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:39,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T07:21:39,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:39,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778559271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778559274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778559275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:39,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T07:21:39,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:39,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T07:21:39,443 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:39,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T07:21:39,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:39,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=570 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/bbc46fe232944492bff7f334c32aec9f 2024-11-28T07:21:39,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/afc824c3d6b9461ba9245cf137d5b2d1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/afc824c3d6b9461ba9245cf137d5b2d1 2024-11-28T07:21:39,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/afc824c3d6b9461ba9245cf137d5b2d1, entries=250, sequenceid=570, filesize=16.8 K 2024-11-28T07:21:39,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/476498d2518e4beb8e162bf023948b67 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/476498d2518e4beb8e162bf023948b67 2024-11-28T07:21:39,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/476498d2518e4beb8e162bf023948b67, entries=150, sequenceid=570, filesize=12.0 K 2024-11-28T07:21:39,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/bbc46fe232944492bff7f334c32aec9f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bbc46fe232944492bff7f334c32aec9f 2024-11-28T07:21:39,556 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bbc46fe232944492bff7f334c32aec9f, entries=150, sequenceid=570, filesize=12.0 K 2024-11-28T07:21:39,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for af0c88dc7f2cd28f9a7271a3bc766683 in 930ms, sequenceid=570, compaction requested=true 2024-11-28T07:21:39,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:39,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:39,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:39,557 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:39,557 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:39,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:39,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:39,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:39,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:39,559 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55276 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:39,559 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:39,559 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,559 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/91a39f4c83564f35abca69ef7231eae0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/32c9faf9116b4c3bad13a42d0a644c4a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4350ef7371df46399c61f6ca23d49fc1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/afc824c3d6b9461ba9245cf137d5b2d1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=54.0 K 2024-11-28T07:21:39,559 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50430 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:39,559 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:39,559 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,560 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93553e73b6354c1ebcf4180bc1f6c892, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bffc3a15456548aabd5678e718fbd843, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/b751604b5b504d43b8136f9661db46c9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/476498d2518e4beb8e162bf023948b67] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=49.2 K 2024-11-28T07:21:39,560 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91a39f4c83564f35abca69ef7231eae0, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1732778495437 2024-11-28T07:21:39,560 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 93553e73b6354c1ebcf4180bc1f6c892, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1732778495437 2024-11-28T07:21:39,561 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32c9faf9116b4c3bad13a42d0a644c4a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732778495755 2024-11-28T07:21:39,561 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting bffc3a15456548aabd5678e718fbd843, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732778495755 2024-11-28T07:21:39,561 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b751604b5b504d43b8136f9661db46c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732778496885 2024-11-28T07:21:39,561 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4350ef7371df46399c61f6ca23d49fc1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732778496885 2024-11-28T07:21:39,562 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 476498d2518e4beb8e162bf023948b67, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=570, earliestPutTs=1732778498016 2024-11-28T07:21:39,562 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting afc824c3d6b9461ba9245cf137d5b2d1, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=570, earliestPutTs=1732778498014 2024-11-28T07:21:39,574 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#118 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:39,575 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#119 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:39,575 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/1693e15515a74b1bbb6af4a90f6a8ce9 is 50, key is test_row_0/B:col10/1732778498016/Put/seqid=0 2024-11-28T07:21:39,575 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/4c5b3144b8b84bbc9103e498b7b705c1 is 50, key is test_row_0/A:col10/1732778498016/Put/seqid=0 2024-11-28T07:21:39,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T07:21:39,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:39,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:39,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:39,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:39,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:39,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:39,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:39,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741957_1133 (size=13629) 2024-11-28T07:21:39,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:39,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T07:21:39,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:39,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:39,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/cfdd31e165eb4d55a9f8371f93a0c10f is 50, key is test_row_0/A:col10/1732778498658/Put/seqid=0 2024-11-28T07:21:39,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741958_1134 (size=13663) 2024-11-28T07:21:39,615 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/1693e15515a74b1bbb6af4a90f6a8ce9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/1693e15515a74b1bbb6af4a90f6a8ce9 2024-11-28T07:21:39,625 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into 1693e15515a74b1bbb6af4a90f6a8ce9(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:39,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741959_1135 (size=14741) 2024-11-28T07:21:39,625 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:39,625 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=12, startTime=1732778499557; duration=0sec 2024-11-28T07:21:39,625 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:39,625 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:39,626 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:39,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=593 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/cfdd31e165eb4d55a9f8371f93a0c10f 2024-11-28T07:21:39,629 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:39,629 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:39,629 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,630 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/4bb0b7b4560b4ca6b99c0099f3c32f8e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8bd417daab6f492d907a45316bc10bfe, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0a466356f29f41369ad9b3fec2310610, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bbc46fe232944492bff7f334c32aec9f] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=49.2 K 2024-11-28T07:21:39,630 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bb0b7b4560b4ca6b99c0099f3c32f8e, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1732778495437 2024-11-28T07:21:39,631 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bd417daab6f492d907a45316bc10bfe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732778495755 2024-11-28T07:21:39,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778559631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,637 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a466356f29f41369ad9b3fec2310610, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=555, earliestPutTs=1732778496885 2024-11-28T07:21:39,638 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting bbc46fe232944492bff7f334c32aec9f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=570, earliestPutTs=1732778498016 2024-11-28T07:21:39,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/c49b7b94dc1a46f884e151be6162a689 is 50, key is test_row_0/B:col10/1732778498658/Put/seqid=0 2024-11-28T07:21:39,654 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#122 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:39,655 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/bba21b5924c444f59e43cf42c49562b1 is 50, key is test_row_0/C:col10/1732778498016/Put/seqid=0 2024-11-28T07:21:39,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741960_1136 (size=12301) 2024-11-28T07:21:39,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=593 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/c49b7b94dc1a46f884e151be6162a689 2024-11-28T07:21:39,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741961_1137 (size=13629) 2024-11-28T07:21:39,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/ad45ad41ca064ba489a3971ac4c7123e is 50, key is test_row_0/C:col10/1732778498658/Put/seqid=0 2024-11-28T07:21:39,703 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/bba21b5924c444f59e43cf42c49562b1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bba21b5924c444f59e43cf42c49562b1 2024-11-28T07:21:39,710 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into bba21b5924c444f59e43cf42c49562b1(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:39,710 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:39,710 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=12, startTime=1732778499558; duration=0sec 2024-11-28T07:21:39,710 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:39,710 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:39,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741962_1138 (size=12301) 2024-11-28T07:21:39,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=593 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/ad45ad41ca064ba489a3971ac4c7123e 2024-11-28T07:21:39,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/cfdd31e165eb4d55a9f8371f93a0c10f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/cfdd31e165eb4d55a9f8371f93a0c10f 2024-11-28T07:21:39,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/cfdd31e165eb4d55a9f8371f93a0c10f, entries=200, sequenceid=593, filesize=14.4 K 2024-11-28T07:21:39,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/c49b7b94dc1a46f884e151be6162a689 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c49b7b94dc1a46f884e151be6162a689 2024-11-28T07:21:39,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c49b7b94dc1a46f884e151be6162a689, entries=150, sequenceid=593, filesize=12.0 K 2024-11-28T07:21:39,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/ad45ad41ca064ba489a3971ac4c7123e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ad45ad41ca064ba489a3971ac4c7123e 2024-11-28T07:21:39,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778559737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ad45ad41ca064ba489a3971ac4c7123e, entries=150, sequenceid=593, filesize=12.0 K 2024-11-28T07:21:39,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for af0c88dc7f2cd28f9a7271a3bc766683 in 161ms, sequenceid=593, compaction requested=false 2024-11-28T07:21:39,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:39,750 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:39,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T07:21:39,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:39,750 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:21:39,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:39,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:39,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:39,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:39,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:39,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:39,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e9017d49725e46b4a0caaa1c6ead23de is 50, key is test_row_0/A:col10/1732778499614/Put/seqid=0 2024-11-28T07:21:39,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:39,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. as already flushing 2024-11-28T07:21:39,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741963_1139 (size=12301) 2024-11-28T07:21:39,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778559811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778559812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778559815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778559917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778559917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778559918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T07:21:39,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:39,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778559941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:39,997 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/4c5b3144b8b84bbc9103e498b7b705c1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4c5b3144b8b84bbc9103e498b7b705c1 2024-11-28T07:21:40,003 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into 4c5b3144b8b84bbc9103e498b7b705c1(size=13.3 K), total size for store is 27.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:40,003 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:40,003 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=12, startTime=1732778499557; duration=0sec 2024-11-28T07:21:40,004 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:40,004 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:40,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778560122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778560122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778560122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,187 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=608 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e9017d49725e46b4a0caaa1c6ead23de 2024-11-28T07:21:40,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/21cf4f0a3b004ed3b9ca9d6f7de7df82 is 50, key is test_row_0/B:col10/1732778499614/Put/seqid=0 2024-11-28T07:21:40,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741964_1140 (size=12301) 2024-11-28T07:21:40,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778560247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778560425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778560426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778560427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,566 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:56318 2024-11-28T07:21:40,566 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:40,566 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:56318 2024-11-28T07:21:40,566 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:56318 2024-11-28T07:21:40,566 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:40,566 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:40,567 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:56318 2024-11-28T07:21:40,567 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:40,601 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=608 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/21cf4f0a3b004ed3b9ca9d6f7de7df82 2024-11-28T07:21:40,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/aa4aff3afbff43db80fc15e12759c0b3 is 50, key is test_row_0/C:col10/1732778499614/Put/seqid=0 2024-11-28T07:21:40,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741965_1141 (size=12301) 2024-11-28T07:21:40,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50384 deadline: 1732778560753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50398 deadline: 1732778560929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50368 deadline: 1732778560931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:40,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50406 deadline: 1732778560932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:40,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T07:21:41,015 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=608 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/aa4aff3afbff43db80fc15e12759c0b3 2024-11-28T07:21:41,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/e9017d49725e46b4a0caaa1c6ead23de as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e9017d49725e46b4a0caaa1c6ead23de 2024-11-28T07:21:41,025 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e9017d49725e46b4a0caaa1c6ead23de, entries=150, sequenceid=608, filesize=12.0 K 2024-11-28T07:21:41,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/21cf4f0a3b004ed3b9ca9d6f7de7df82 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/21cf4f0a3b004ed3b9ca9d6f7de7df82 2024-11-28T07:21:41,031 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/21cf4f0a3b004ed3b9ca9d6f7de7df82, entries=150, sequenceid=608, filesize=12.0 K 2024-11-28T07:21:41,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/aa4aff3afbff43db80fc15e12759c0b3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa4aff3afbff43db80fc15e12759c0b3 2024-11-28T07:21:41,036 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa4aff3afbff43db80fc15e12759c0b3, entries=150, sequenceid=608, filesize=12.0 K 2024-11-28T07:21:41,037 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for af0c88dc7f2cd28f9a7271a3bc766683 in 1287ms, sequenceid=608, compaction requested=true 2024-11-28T07:21:41,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:41,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:41,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-28T07:21:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-11-28T07:21:41,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-28T07:21:41,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2070 sec 2024-11-28T07:21:41,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 2.2110 sec 2024-11-28T07:21:41,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:41,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:41,763 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:56318 2024-11-28T07:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:41,763 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:41,763 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:41,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/eb7eab746864441ab65fab6d6d4068c0 is 50, key is test_row_0/A:col10/1732778501761/Put/seqid=0 2024-11-28T07:21:41,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741966_1142 (size=12301) 2024-11-28T07:21:41,938 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:56318 2024-11-28T07:21:41,938 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:41,939 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:56318 2024-11-28T07:21:41,939 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:41,941 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:56318 2024-11-28T07:21:41,941 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:42,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=633 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/eb7eab746864441ab65fab6d6d4068c0 2024-11-28T07:21:42,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/e1b4e70b135b426bbb0141094b6ee50e is 50, key is test_row_0/B:col10/1732778501761/Put/seqid=0 2024-11-28T07:21:42,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741967_1143 (size=12301) 2024-11-28T07:21:42,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=633 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/e1b4e70b135b426bbb0141094b6ee50e 2024-11-28T07:21:42,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/78373a46dcf046fabfe163fa252340e7 is 50, key is test_row_0/C:col10/1732778501761/Put/seqid=0 2024-11-28T07:21:42,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741968_1144 (size=12301) 2024-11-28T07:21:42,846 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T07:21:42,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T07:21:42,936 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-28T07:21:43,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=633 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/78373a46dcf046fabfe163fa252340e7 2024-11-28T07:21:43,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/eb7eab746864441ab65fab6d6d4068c0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/eb7eab746864441ab65fab6d6d4068c0 2024-11-28T07:21:43,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/eb7eab746864441ab65fab6d6d4068c0, entries=150, sequenceid=633, filesize=12.0 K 2024-11-28T07:21:43,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/e1b4e70b135b426bbb0141094b6ee50e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e1b4e70b135b426bbb0141094b6ee50e 2024-11-28T07:21:43,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e1b4e70b135b426bbb0141094b6ee50e, entries=150, sequenceid=633, filesize=12.0 K 2024-11-28T07:21:43,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/78373a46dcf046fabfe163fa252340e7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/78373a46dcf046fabfe163fa252340e7 2024-11-28T07:21:43,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/78373a46dcf046fabfe163fa252340e7, entries=150, sequenceid=633, filesize=12.0 K 2024-11-28T07:21:43,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=20.13 KB/20610 for af0c88dc7f2cd28f9a7271a3bc766683 in 1258ms, sequenceid=633, compaction requested=true 2024-11-28T07:21:43,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:43,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:43,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:43,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:43,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:43,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store af0c88dc7f2cd28f9a7271a3bc766683:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:43,021 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:43,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:43,021 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:43,022 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52972 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:43,022 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50566 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:43,022 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/A is initiating minor compaction (all files) 2024-11-28T07:21:43,022 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/B is initiating minor compaction (all files) 2024-11-28T07:21:43,022 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/A in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:43,022 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/B in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:43,022 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4c5b3144b8b84bbc9103e498b7b705c1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/cfdd31e165eb4d55a9f8371f93a0c10f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e9017d49725e46b4a0caaa1c6ead23de, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/eb7eab746864441ab65fab6d6d4068c0] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=51.7 K 2024-11-28T07:21:43,022 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/1693e15515a74b1bbb6af4a90f6a8ce9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c49b7b94dc1a46f884e151be6162a689, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/21cf4f0a3b004ed3b9ca9d6f7de7df82, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e1b4e70b135b426bbb0141094b6ee50e] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=49.4 K 2024-11-28T07:21:43,023 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c5b3144b8b84bbc9103e498b7b705c1, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=570, earliestPutTs=1732778498016 2024-11-28T07:21:43,023 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1693e15515a74b1bbb6af4a90f6a8ce9, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=570, earliestPutTs=1732778498016 2024-11-28T07:21:43,023 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfdd31e165eb4d55a9f8371f93a0c10f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=593, earliestPutTs=1732778498651 2024-11-28T07:21:43,023 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c49b7b94dc1a46f884e151be6162a689, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=593, earliestPutTs=1732778498651 2024-11-28T07:21:43,024 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9017d49725e46b4a0caaa1c6ead23de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=608, earliestPutTs=1732778499614 2024-11-28T07:21:43,024 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 21cf4f0a3b004ed3b9ca9d6f7de7df82, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=608, earliestPutTs=1732778499614 2024-11-28T07:21:43,024 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb7eab746864441ab65fab6d6d4068c0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=633, earliestPutTs=1732778499804 2024-11-28T07:21:43,024 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e1b4e70b135b426bbb0141094b6ee50e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=633, earliestPutTs=1732778499804 2024-11-28T07:21:43,038 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#B#compaction#130 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:43,038 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#A#compaction#131 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:43,039 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/d6767c883af046a38af3c4caf0632a6e is 50, key is test_row_0/B:col10/1732778501761/Put/seqid=0 2024-11-28T07:21:43,039 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/fdad00b17b1c417c990dd59fae736c6b is 50, key is test_row_0/A:col10/1732778501761/Put/seqid=0 2024-11-28T07:21:43,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741969_1145 (size=13765) 2024-11-28T07:21:43,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741970_1146 (size=13799) 2024-11-28T07:21:43,055 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/d6767c883af046a38af3c4caf0632a6e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d6767c883af046a38af3c4caf0632a6e 2024-11-28T07:21:43,060 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/B of af0c88dc7f2cd28f9a7271a3bc766683 into d6767c883af046a38af3c4caf0632a6e(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:43,060 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:43,060 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/B, priority=12, startTime=1732778503021; duration=0sec 2024-11-28T07:21:43,061 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:43,061 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:B 2024-11-28T07:21:43,061 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:43,062 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50532 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:43,062 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): af0c88dc7f2cd28f9a7271a3bc766683/C is initiating minor compaction (all files) 2024-11-28T07:21:43,062 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of af0c88dc7f2cd28f9a7271a3bc766683/C in TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:43,063 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bba21b5924c444f59e43cf42c49562b1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ad45ad41ca064ba489a3971ac4c7123e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa4aff3afbff43db80fc15e12759c0b3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/78373a46dcf046fabfe163fa252340e7] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp, totalSize=49.3 K 2024-11-28T07:21:43,063 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting bba21b5924c444f59e43cf42c49562b1, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=570, earliestPutTs=1732778498016 2024-11-28T07:21:43,063 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ad45ad41ca064ba489a3971ac4c7123e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=593, earliestPutTs=1732778498651 2024-11-28T07:21:43,064 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting aa4aff3afbff43db80fc15e12759c0b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=608, earliestPutTs=1732778499614 2024-11-28T07:21:43,064 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 78373a46dcf046fabfe163fa252340e7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=633, earliestPutTs=1732778499804 2024-11-28T07:21:43,068 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:56318 2024-11-28T07:21:43,068 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:43,068 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T07:21:43,068 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 132 2024-11-28T07:21:43,068 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 110 2024-11-28T07:21:43,068 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 112 2024-11-28T07:21:43,068 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 132 2024-11-28T07:21:43,068 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 20 2024-11-28T07:21:43,069 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T07:21:43,069 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5591 2024-11-28T07:21:43,069 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5468 2024-11-28T07:21:43,069 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T07:21:43,069 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2493 2024-11-28T07:21:43,069 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7478 rows 2024-11-28T07:21:43,069 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2484 2024-11-28T07:21:43,069 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7452 rows 2024-11-28T07:21:43,069 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T07:21:43,069 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:56318 2024-11-28T07:21:43,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:21:43,076 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T07:21:43,081 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T07:21:43,082 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): af0c88dc7f2cd28f9a7271a3bc766683#C#compaction#132 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:43,083 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/0ccf1192bada4a5b87fcab52f5bee849 is 50, key is test_row_0/C:col10/1732778501761/Put/seqid=0 2024-11-28T07:21:43,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:43,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741971_1147 (size=13765) 2024-11-28T07:21:43,091 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778503090"}]},"ts":"1732778503090"} 2024-11-28T07:21:43,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T07:21:43,092 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T07:21:43,095 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T07:21:43,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T07:21:43,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=af0c88dc7f2cd28f9a7271a3bc766683, UNASSIGN}] 2024-11-28T07:21:43,101 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=af0c88dc7f2cd28f9a7271a3bc766683, UNASSIGN 2024-11-28T07:21:43,102 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=af0c88dc7f2cd28f9a7271a3bc766683, regionState=CLOSING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:43,103 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T07:21:43,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:21:43,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T07:21:43,259 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:43,260 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:43,261 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T07:21:43,261 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing af0c88dc7f2cd28f9a7271a3bc766683, disabling compactions & flushes 2024-11-28T07:21:43,261 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1942): waiting for 2 compactions to complete for region TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T07:21:43,451 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/fdad00b17b1c417c990dd59fae736c6b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/fdad00b17b1c417c990dd59fae736c6b 2024-11-28T07:21:43,455 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/A of af0c88dc7f2cd28f9a7271a3bc766683 into fdad00b17b1c417c990dd59fae736c6b(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:43,455 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:43,455 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/A, priority=12, startTime=1732778503021; duration=0sec 2024-11-28T07:21:43,456 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:43,456 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:A 2024-11-28T07:21:43,495 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/0ccf1192bada4a5b87fcab52f5bee849 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0ccf1192bada4a5b87fcab52f5bee849 2024-11-28T07:21:43,500 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in af0c88dc7f2cd28f9a7271a3bc766683/C of af0c88dc7f2cd28f9a7271a3bc766683 into 0ccf1192bada4a5b87fcab52f5bee849(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:43,500 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:43,500 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683., storeName=af0c88dc7f2cd28f9a7271a3bc766683/C, priority=12, startTime=1732778503021; duration=0sec 2024-11-28T07:21:43,500 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:43,500 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:43,500 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: af0c88dc7f2cd28f9a7271a3bc766683:C 2024-11-28T07:21:43,500 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:43,500 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. after waiting 0 ms 2024-11-28T07:21:43,500 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:43,500 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(2837): Flushing af0c88dc7f2cd28f9a7271a3bc766683 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-28T07:21:43,501 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=A 2024-11-28T07:21:43,501 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:43,501 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=B 2024-11-28T07:21:43,501 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:43,501 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK af0c88dc7f2cd28f9a7271a3bc766683, store=C 2024-11-28T07:21:43,501 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:43,505 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/6e94992a47ba4252afd17b23c1fab254 is 50, key is test_row_1/A:col10/1732778503067/Put/seqid=0 2024-11-28T07:21:43,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741972_1148 (size=9857) 2024-11-28T07:21:43,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T07:21:43,910 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=643 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/6e94992a47ba4252afd17b23c1fab254 2024-11-28T07:21:43,919 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/04f32e4df145475fb9bcb906307609c1 is 50, key is test_row_1/B:col10/1732778503067/Put/seqid=0 2024-11-28T07:21:43,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741973_1149 (size=9857) 2024-11-28T07:21:44,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T07:21:44,324 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=643 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/04f32e4df145475fb9bcb906307609c1 2024-11-28T07:21:44,331 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/be69ebb0f5c0428c8ca8a18a839317b2 is 50, key is test_row_1/C:col10/1732778503067/Put/seqid=0 2024-11-28T07:21:44,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741974_1150 (size=9857) 2024-11-28T07:21:44,736 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=643 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/be69ebb0f5c0428c8ca8a18a839317b2 2024-11-28T07:21:44,741 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/A/6e94992a47ba4252afd17b23c1fab254 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/6e94992a47ba4252afd17b23c1fab254 2024-11-28T07:21:44,745 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/6e94992a47ba4252afd17b23c1fab254, entries=100, sequenceid=643, filesize=9.6 K 2024-11-28T07:21:44,746 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/B/04f32e4df145475fb9bcb906307609c1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/04f32e4df145475fb9bcb906307609c1 2024-11-28T07:21:44,750 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/04f32e4df145475fb9bcb906307609c1, entries=100, sequenceid=643, filesize=9.6 K 2024-11-28T07:21:44,751 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/.tmp/C/be69ebb0f5c0428c8ca8a18a839317b2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/be69ebb0f5c0428c8ca8a18a839317b2 2024-11-28T07:21:44,755 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/be69ebb0f5c0428c8ca8a18a839317b2, entries=100, sequenceid=643, filesize=9.6 K 2024-11-28T07:21:44,756 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for af0c88dc7f2cd28f9a7271a3bc766683 in 1256ms, sequenceid=643, compaction requested=false 2024-11-28T07:21:44,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1c2ca8a251d04aebb9ad00c4c30d82d2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/7177d32ac69649c79d5867bb47d4b4cd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/5c04daf35e42411bbcaf6c6691eb7e75, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1a9fa977ccaa473bb64a8fe9ed181c8d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/6054179a711e499fbf2aeef79121ad1f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ecb0b94a509a46b2ba7b82545a1adfc5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/de266d2c5bdc4624a875bb7f1418dbd5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d7e32d8b86944f7184d437c1e4ffbc5c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d81ba174d33a47178ac3ade3880e3b68, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/893fc21154bc468cb812692927e40832, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/636be1ff13954dff9b4f30e8e0eb6f57, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/47b995c82fe6414daee7244eac4bd58b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1d27d455ea35423496768c4cab987ba0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b5507d8b00154c91bdb1cbb80310dee7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/872a584ac1a84e65bc108afb12056b42, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3b197017e1c34778b1294cb707d8af61, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/66aa061ef35b490f92f0ab1358d58ea9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3a04a66bd8c24bffa387a3038fa0bac0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f3669ad9cfcc43d89186c0abd6f35f5a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4755983048ff4de48c961ce9d3873f27, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ae0f7f3df2f3480a890a7cc6fabc7aa8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/16645ff93de5492cbd613fc99fbcf176, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/0978f99079a648a6991ca008712bb8f9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f5c0294fec654fb89cecce8429e5f518, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e49a022a00784a6f9965113d347c3d10, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/56d74c5b7cfa48de886b92d3001fe3a0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ea498121ce2f4843bfb3163b8503c087, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3e1aa9e28dc747779c824eccd0f02a22, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b042921da0664a938f49dd45b9d979c8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/c808903417074cf1ac6e2d4b64f1b32e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e545d44915274624a24721a5d23eee20, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e4d52d8331be48bdaf4db66e4c5b5b22, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/dadf023a87a04385b565b96b8bae294f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/511113fc153144e3b21c93dc5ad303d1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d8c6c52dbd1f4fd99baca8f36f825ae9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/91a39f4c83564f35abca69ef7231eae0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/32c9faf9116b4c3bad13a42d0a644c4a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4350ef7371df46399c61f6ca23d49fc1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/afc824c3d6b9461ba9245cf137d5b2d1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4c5b3144b8b84bbc9103e498b7b705c1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/cfdd31e165eb4d55a9f8371f93a0c10f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e9017d49725e46b4a0caaa1c6ead23de, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/eb7eab746864441ab65fab6d6d4068c0] to archive 2024-11-28T07:21:44,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:21:44,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1c2ca8a251d04aebb9ad00c4c30d82d2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1c2ca8a251d04aebb9ad00c4c30d82d2 2024-11-28T07:21:44,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/7177d32ac69649c79d5867bb47d4b4cd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/7177d32ac69649c79d5867bb47d4b4cd 2024-11-28T07:21:44,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/5c04daf35e42411bbcaf6c6691eb7e75 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/5c04daf35e42411bbcaf6c6691eb7e75 2024-11-28T07:21:44,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1a9fa977ccaa473bb64a8fe9ed181c8d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1a9fa977ccaa473bb64a8fe9ed181c8d 2024-11-28T07:21:44,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/6054179a711e499fbf2aeef79121ad1f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/6054179a711e499fbf2aeef79121ad1f 2024-11-28T07:21:44,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ecb0b94a509a46b2ba7b82545a1adfc5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ecb0b94a509a46b2ba7b82545a1adfc5 2024-11-28T07:21:44,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/de266d2c5bdc4624a875bb7f1418dbd5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/de266d2c5bdc4624a875bb7f1418dbd5 2024-11-28T07:21:44,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d7e32d8b86944f7184d437c1e4ffbc5c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d7e32d8b86944f7184d437c1e4ffbc5c 2024-11-28T07:21:44,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d81ba174d33a47178ac3ade3880e3b68 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d81ba174d33a47178ac3ade3880e3b68 2024-11-28T07:21:44,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/893fc21154bc468cb812692927e40832 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/893fc21154bc468cb812692927e40832 2024-11-28T07:21:44,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/636be1ff13954dff9b4f30e8e0eb6f57 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/636be1ff13954dff9b4f30e8e0eb6f57 2024-11-28T07:21:44,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/47b995c82fe6414daee7244eac4bd58b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/47b995c82fe6414daee7244eac4bd58b 2024-11-28T07:21:44,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1d27d455ea35423496768c4cab987ba0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/1d27d455ea35423496768c4cab987ba0 2024-11-28T07:21:44,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b5507d8b00154c91bdb1cbb80310dee7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b5507d8b00154c91bdb1cbb80310dee7 2024-11-28T07:21:44,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/872a584ac1a84e65bc108afb12056b42 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/872a584ac1a84e65bc108afb12056b42 2024-11-28T07:21:44,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3b197017e1c34778b1294cb707d8af61 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3b197017e1c34778b1294cb707d8af61 2024-11-28T07:21:44,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/66aa061ef35b490f92f0ab1358d58ea9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/66aa061ef35b490f92f0ab1358d58ea9 2024-11-28T07:21:44,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3a04a66bd8c24bffa387a3038fa0bac0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3a04a66bd8c24bffa387a3038fa0bac0 2024-11-28T07:21:44,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f3669ad9cfcc43d89186c0abd6f35f5a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f3669ad9cfcc43d89186c0abd6f35f5a 2024-11-28T07:21:44,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4755983048ff4de48c961ce9d3873f27 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4755983048ff4de48c961ce9d3873f27 2024-11-28T07:21:44,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ae0f7f3df2f3480a890a7cc6fabc7aa8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ae0f7f3df2f3480a890a7cc6fabc7aa8 2024-11-28T07:21:44,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/16645ff93de5492cbd613fc99fbcf176 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/16645ff93de5492cbd613fc99fbcf176 2024-11-28T07:21:44,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/0978f99079a648a6991ca008712bb8f9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/0978f99079a648a6991ca008712bb8f9 2024-11-28T07:21:44,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f5c0294fec654fb89cecce8429e5f518 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/f5c0294fec654fb89cecce8429e5f518 2024-11-28T07:21:44,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e49a022a00784a6f9965113d347c3d10 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e49a022a00784a6f9965113d347c3d10 2024-11-28T07:21:44,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/56d74c5b7cfa48de886b92d3001fe3a0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/56d74c5b7cfa48de886b92d3001fe3a0 2024-11-28T07:21:44,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ea498121ce2f4843bfb3163b8503c087 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/ea498121ce2f4843bfb3163b8503c087 2024-11-28T07:21:44,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3e1aa9e28dc747779c824eccd0f02a22 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/3e1aa9e28dc747779c824eccd0f02a22 2024-11-28T07:21:44,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b042921da0664a938f49dd45b9d979c8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/b042921da0664a938f49dd45b9d979c8 2024-11-28T07:21:44,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/c808903417074cf1ac6e2d4b64f1b32e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/c808903417074cf1ac6e2d4b64f1b32e 2024-11-28T07:21:44,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e545d44915274624a24721a5d23eee20 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e545d44915274624a24721a5d23eee20 2024-11-28T07:21:44,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e4d52d8331be48bdaf4db66e4c5b5b22 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e4d52d8331be48bdaf4db66e4c5b5b22 2024-11-28T07:21:44,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/dadf023a87a04385b565b96b8bae294f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/dadf023a87a04385b565b96b8bae294f 2024-11-28T07:21:44,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/511113fc153144e3b21c93dc5ad303d1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/511113fc153144e3b21c93dc5ad303d1 2024-11-28T07:21:44,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d8c6c52dbd1f4fd99baca8f36f825ae9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/d8c6c52dbd1f4fd99baca8f36f825ae9 2024-11-28T07:21:44,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/91a39f4c83564f35abca69ef7231eae0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/91a39f4c83564f35abca69ef7231eae0 2024-11-28T07:21:44,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/32c9faf9116b4c3bad13a42d0a644c4a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/32c9faf9116b4c3bad13a42d0a644c4a 2024-11-28T07:21:44,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4350ef7371df46399c61f6ca23d49fc1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4350ef7371df46399c61f6ca23d49fc1 2024-11-28T07:21:44,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/afc824c3d6b9461ba9245cf137d5b2d1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/afc824c3d6b9461ba9245cf137d5b2d1 2024-11-28T07:21:44,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4c5b3144b8b84bbc9103e498b7b705c1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/4c5b3144b8b84bbc9103e498b7b705c1 2024-11-28T07:21:44,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/cfdd31e165eb4d55a9f8371f93a0c10f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/cfdd31e165eb4d55a9f8371f93a0c10f 2024-11-28T07:21:44,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e9017d49725e46b4a0caaa1c6ead23de to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/e9017d49725e46b4a0caaa1c6ead23de 2024-11-28T07:21:44,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/eb7eab746864441ab65fab6d6d4068c0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/eb7eab746864441ab65fab6d6d4068c0 2024-11-28T07:21:44,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6957ecb2f7764a6890938abd3acc9614, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/39ca18fc99324d5b97047eaa5ea500ae, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/85103c5dfe0647ac94239a4b144fc831, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/79c1ff1f2e004bfcb00faa097ac869c0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8e907a8ee1b64cad9f152be72cdac58e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/30c16a32eda648e3b512756cba548543, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ff3bc399d2b744ed87632549e61474ac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/743691d392254245a5acd997b7b8fd52, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ce50fe3a443843038474cf3ef149d8d4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93ee609f9ad44f8093fbb8d65ceb3943, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/afd7edd1c6524843986906c6d3d59ad3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/54e43af7ab8542d890fda3aed31ac13d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5da185e8eb194c219007fa4c09c002ec, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/2c88c6acc92546459c58369c19c37dcd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/884d0f00c23c446b96e5b8f497138eb9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/25df1fa4d0f14238a1be248c4a917cef, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c830dc83571c46efb09e05ba589c2705, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e2573f6bd9c044f69e7cc247c623c7aa, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6f23f5e747d24581b8419931dfbc6ef8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bad3fa27a96543f3bd016fa1ce0ee1aa, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5af7ba73fc464f4a85d49d0aa388ddc1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d4a191a1af6b403ab3f2693ba5e4b666, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/599bec92658447e48e849f55a1d571f0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/02328af6c2534521938f2a17d660d9f1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/563ef0ab3e1c4fa7acd2b9db10edf03e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/a9373a1f63d845269b4438132fb2fa6c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8dba9b80f0a944c3b60f82dcb5f1424b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/62ee64e1b9474149a7fff2c6b54ab479, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d84b23776cab4aa39839b7ced87ef3d8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/be84293200694e9b8bc5032187b940e9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ffb6504fb81049898e0eb88e95f6f016, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/3e16fe36d8e4466fac6e895126f9bc7f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/436cef79889246f4aa2e37e2b6043372, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/88622588f9ce470a9b43775c3d72b594, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8c8ad6165c3445e7adc318000576bc26, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93553e73b6354c1ebcf4180bc1f6c892, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/86b81261a02843889abd287ee613de87, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bffc3a15456548aabd5678e718fbd843, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/b751604b5b504d43b8136f9661db46c9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/1693e15515a74b1bbb6af4a90f6a8ce9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/476498d2518e4beb8e162bf023948b67, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c49b7b94dc1a46f884e151be6162a689, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/21cf4f0a3b004ed3b9ca9d6f7de7df82, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e1b4e70b135b426bbb0141094b6ee50e] to archive 2024-11-28T07:21:44,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:21:44,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6957ecb2f7764a6890938abd3acc9614 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6957ecb2f7764a6890938abd3acc9614 2024-11-28T07:21:44,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/39ca18fc99324d5b97047eaa5ea500ae to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/39ca18fc99324d5b97047eaa5ea500ae 2024-11-28T07:21:44,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/85103c5dfe0647ac94239a4b144fc831 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/85103c5dfe0647ac94239a4b144fc831 2024-11-28T07:21:44,844 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/79c1ff1f2e004bfcb00faa097ac869c0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/79c1ff1f2e004bfcb00faa097ac869c0 2024-11-28T07:21:44,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8e907a8ee1b64cad9f152be72cdac58e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8e907a8ee1b64cad9f152be72cdac58e 2024-11-28T07:21:44,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/30c16a32eda648e3b512756cba548543 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/30c16a32eda648e3b512756cba548543 2024-11-28T07:21:44,848 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ff3bc399d2b744ed87632549e61474ac to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ff3bc399d2b744ed87632549e61474ac 2024-11-28T07:21:44,850 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/743691d392254245a5acd997b7b8fd52 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/743691d392254245a5acd997b7b8fd52 2024-11-28T07:21:44,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ce50fe3a443843038474cf3ef149d8d4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ce50fe3a443843038474cf3ef149d8d4 2024-11-28T07:21:44,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93ee609f9ad44f8093fbb8d65ceb3943 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93ee609f9ad44f8093fbb8d65ceb3943 2024-11-28T07:21:44,854 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/afd7edd1c6524843986906c6d3d59ad3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/afd7edd1c6524843986906c6d3d59ad3 2024-11-28T07:21:44,855 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/54e43af7ab8542d890fda3aed31ac13d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/54e43af7ab8542d890fda3aed31ac13d 2024-11-28T07:21:44,856 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5da185e8eb194c219007fa4c09c002ec to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5da185e8eb194c219007fa4c09c002ec 2024-11-28T07:21:44,858 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/2c88c6acc92546459c58369c19c37dcd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/2c88c6acc92546459c58369c19c37dcd 2024-11-28T07:21:44,859 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/884d0f00c23c446b96e5b8f497138eb9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/884d0f00c23c446b96e5b8f497138eb9 2024-11-28T07:21:44,860 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/25df1fa4d0f14238a1be248c4a917cef to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/25df1fa4d0f14238a1be248c4a917cef 2024-11-28T07:21:44,861 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c830dc83571c46efb09e05ba589c2705 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c830dc83571c46efb09e05ba589c2705 2024-11-28T07:21:44,863 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e2573f6bd9c044f69e7cc247c623c7aa to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e2573f6bd9c044f69e7cc247c623c7aa 2024-11-28T07:21:44,864 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6f23f5e747d24581b8419931dfbc6ef8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/6f23f5e747d24581b8419931dfbc6ef8 2024-11-28T07:21:44,865 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bad3fa27a96543f3bd016fa1ce0ee1aa to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bad3fa27a96543f3bd016fa1ce0ee1aa 2024-11-28T07:21:44,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5af7ba73fc464f4a85d49d0aa388ddc1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/5af7ba73fc464f4a85d49d0aa388ddc1 2024-11-28T07:21:44,867 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d4a191a1af6b403ab3f2693ba5e4b666 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d4a191a1af6b403ab3f2693ba5e4b666 2024-11-28T07:21:44,868 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/599bec92658447e48e849f55a1d571f0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/599bec92658447e48e849f55a1d571f0 2024-11-28T07:21:44,869 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/02328af6c2534521938f2a17d660d9f1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/02328af6c2534521938f2a17d660d9f1 2024-11-28T07:21:44,870 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/563ef0ab3e1c4fa7acd2b9db10edf03e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/563ef0ab3e1c4fa7acd2b9db10edf03e 2024-11-28T07:21:44,871 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/a9373a1f63d845269b4438132fb2fa6c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/a9373a1f63d845269b4438132fb2fa6c 2024-11-28T07:21:44,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8dba9b80f0a944c3b60f82dcb5f1424b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8dba9b80f0a944c3b60f82dcb5f1424b 2024-11-28T07:21:44,873 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/62ee64e1b9474149a7fff2c6b54ab479 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/62ee64e1b9474149a7fff2c6b54ab479 2024-11-28T07:21:44,874 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d84b23776cab4aa39839b7ced87ef3d8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d84b23776cab4aa39839b7ced87ef3d8 2024-11-28T07:21:44,876 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/be84293200694e9b8bc5032187b940e9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/be84293200694e9b8bc5032187b940e9 2024-11-28T07:21:44,877 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ffb6504fb81049898e0eb88e95f6f016 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/ffb6504fb81049898e0eb88e95f6f016 2024-11-28T07:21:44,878 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/3e16fe36d8e4466fac6e895126f9bc7f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/3e16fe36d8e4466fac6e895126f9bc7f 2024-11-28T07:21:44,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/436cef79889246f4aa2e37e2b6043372 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/436cef79889246f4aa2e37e2b6043372 2024-11-28T07:21:44,880 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/88622588f9ce470a9b43775c3d72b594 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/88622588f9ce470a9b43775c3d72b594 2024-11-28T07:21:44,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8c8ad6165c3445e7adc318000576bc26 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/8c8ad6165c3445e7adc318000576bc26 2024-11-28T07:21:44,883 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93553e73b6354c1ebcf4180bc1f6c892 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/93553e73b6354c1ebcf4180bc1f6c892 2024-11-28T07:21:44,884 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/86b81261a02843889abd287ee613de87 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/86b81261a02843889abd287ee613de87 2024-11-28T07:21:44,885 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bffc3a15456548aabd5678e718fbd843 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/bffc3a15456548aabd5678e718fbd843 2024-11-28T07:21:44,887 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/b751604b5b504d43b8136f9661db46c9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/b751604b5b504d43b8136f9661db46c9 2024-11-28T07:21:44,888 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/1693e15515a74b1bbb6af4a90f6a8ce9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/1693e15515a74b1bbb6af4a90f6a8ce9 2024-11-28T07:21:44,889 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/476498d2518e4beb8e162bf023948b67 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/476498d2518e4beb8e162bf023948b67 2024-11-28T07:21:44,891 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c49b7b94dc1a46f884e151be6162a689 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/c49b7b94dc1a46f884e151be6162a689 2024-11-28T07:21:44,892 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/21cf4f0a3b004ed3b9ca9d6f7de7df82 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/21cf4f0a3b004ed3b9ca9d6f7de7df82 2024-11-28T07:21:44,893 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e1b4e70b135b426bbb0141094b6ee50e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/e1b4e70b135b426bbb0141094b6ee50e 2024-11-28T07:21:44,895 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a6a0316196634f89a7708516218e5626, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d34df5b7ce1e43bd945df01bb4239692, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ed95a9a850e54a509414dee1a0f70766, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/081678340c0e45cba54ab58540e5097f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/580ed7a2b6ca4444aaf2a1d58b792b24, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/308092e494df4d778923dc8b95e445f4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/60ed42845dfc4f32b83f83646e682330, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d46d2ea8a7404a54ad92d5529ab9af4c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8251f7dd41b94344b70cfea032f2f3ed, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f28eeb327b344b58bacf2a9c22b23d5b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d23eeb423f7940eda0bc204f5b827199, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a1c58ecabbb540778dc16dc378e99d17, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0102591f9fca41fcb4137df5b167dc64, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa143092d9734f77a602f9b91765c1f8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/88373fad88374dcb86a345dcc5bd9b18, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8decff08d24e4e8fa73b4b7dcc4a82ee, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d461b0941cdb405fa2f1285bae2df13b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9f4d152d7caa4114ac0dc12838a49d22, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f31b3b7eb0b7442a99542b7b3d7cd39d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/501022436163424484a0579633e149c8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/36a1755b02c941fcb07977e30e7d1c6b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/3ebdebe8c6594d33894dd77fb915e39b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d25d6f7e40d245eba818d78aed5e8bbc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/406c4df216aa4a25aa90ab95f41abd43, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9711c7dab8cf498baf1e71f697ca227f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/fbff2f05ca5743b28b58158f6b3dba64, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/6cae9ce366184434976d84a3d74f97f1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9678aca61bf54f85b3709f66036d230d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9084e829a0354f23b374b61758900d09, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d7617a6ef323423e92f0960e6d7cc5c7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1522e4ed5d4e4cf6a99546db5afedb71, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1e493b151cda47d8a16e6f216409a94c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/35fd10f1b3774aefb915e6613c5e3639, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d60ed96aa64646998148c7d8d4be7b33, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/4bb0b7b4560b4ca6b99c0099f3c32f8e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bfd4429387d3473a9937df7ee87fbaba, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8bd417daab6f492d907a45316bc10bfe, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0a466356f29f41369ad9b3fec2310610, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bba21b5924c444f59e43cf42c49562b1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bbc46fe232944492bff7f334c32aec9f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ad45ad41ca064ba489a3971ac4c7123e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa4aff3afbff43db80fc15e12759c0b3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/78373a46dcf046fabfe163fa252340e7] to archive 2024-11-28T07:21:44,896 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:21:44,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a6a0316196634f89a7708516218e5626 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a6a0316196634f89a7708516218e5626 2024-11-28T07:21:44,898 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d34df5b7ce1e43bd945df01bb4239692 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d34df5b7ce1e43bd945df01bb4239692 2024-11-28T07:21:44,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ed95a9a850e54a509414dee1a0f70766 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ed95a9a850e54a509414dee1a0f70766 2024-11-28T07:21:44,901 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/081678340c0e45cba54ab58540e5097f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/081678340c0e45cba54ab58540e5097f 2024-11-28T07:21:44,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/580ed7a2b6ca4444aaf2a1d58b792b24 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/580ed7a2b6ca4444aaf2a1d58b792b24 2024-11-28T07:21:44,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/308092e494df4d778923dc8b95e445f4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/308092e494df4d778923dc8b95e445f4 2024-11-28T07:21:44,904 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/60ed42845dfc4f32b83f83646e682330 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/60ed42845dfc4f32b83f83646e682330 2024-11-28T07:21:44,905 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d46d2ea8a7404a54ad92d5529ab9af4c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d46d2ea8a7404a54ad92d5529ab9af4c 2024-11-28T07:21:44,906 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8251f7dd41b94344b70cfea032f2f3ed to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8251f7dd41b94344b70cfea032f2f3ed 2024-11-28T07:21:44,907 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f28eeb327b344b58bacf2a9c22b23d5b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f28eeb327b344b58bacf2a9c22b23d5b 2024-11-28T07:21:44,908 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d23eeb423f7940eda0bc204f5b827199 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d23eeb423f7940eda0bc204f5b827199 2024-11-28T07:21:44,909 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a1c58ecabbb540778dc16dc378e99d17 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/a1c58ecabbb540778dc16dc378e99d17 2024-11-28T07:21:44,910 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0102591f9fca41fcb4137df5b167dc64 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0102591f9fca41fcb4137df5b167dc64 2024-11-28T07:21:44,911 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa143092d9734f77a602f9b91765c1f8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa143092d9734f77a602f9b91765c1f8 2024-11-28T07:21:44,912 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/88373fad88374dcb86a345dcc5bd9b18 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/88373fad88374dcb86a345dcc5bd9b18 2024-11-28T07:21:44,913 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8decff08d24e4e8fa73b4b7dcc4a82ee to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8decff08d24e4e8fa73b4b7dcc4a82ee 2024-11-28T07:21:44,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d461b0941cdb405fa2f1285bae2df13b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d461b0941cdb405fa2f1285bae2df13b 2024-11-28T07:21:44,915 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9f4d152d7caa4114ac0dc12838a49d22 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9f4d152d7caa4114ac0dc12838a49d22 2024-11-28T07:21:44,917 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f31b3b7eb0b7442a99542b7b3d7cd39d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/f31b3b7eb0b7442a99542b7b3d7cd39d 2024-11-28T07:21:44,917 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/501022436163424484a0579633e149c8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/501022436163424484a0579633e149c8 2024-11-28T07:21:44,918 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/36a1755b02c941fcb07977e30e7d1c6b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/36a1755b02c941fcb07977e30e7d1c6b 2024-11-28T07:21:44,919 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/3ebdebe8c6594d33894dd77fb915e39b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/3ebdebe8c6594d33894dd77fb915e39b 2024-11-28T07:21:44,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d25d6f7e40d245eba818d78aed5e8bbc to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d25d6f7e40d245eba818d78aed5e8bbc 2024-11-28T07:21:44,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/406c4df216aa4a25aa90ab95f41abd43 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/406c4df216aa4a25aa90ab95f41abd43 2024-11-28T07:21:44,923 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9711c7dab8cf498baf1e71f697ca227f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9711c7dab8cf498baf1e71f697ca227f 2024-11-28T07:21:44,924 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/fbff2f05ca5743b28b58158f6b3dba64 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/fbff2f05ca5743b28b58158f6b3dba64 2024-11-28T07:21:44,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/6cae9ce366184434976d84a3d74f97f1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/6cae9ce366184434976d84a3d74f97f1 2024-11-28T07:21:44,927 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9678aca61bf54f85b3709f66036d230d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9678aca61bf54f85b3709f66036d230d 2024-11-28T07:21:44,929 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9084e829a0354f23b374b61758900d09 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/9084e829a0354f23b374b61758900d09 2024-11-28T07:21:44,930 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d7617a6ef323423e92f0960e6d7cc5c7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d7617a6ef323423e92f0960e6d7cc5c7 2024-11-28T07:21:44,932 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1522e4ed5d4e4cf6a99546db5afedb71 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1522e4ed5d4e4cf6a99546db5afedb71 2024-11-28T07:21:44,934 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1e493b151cda47d8a16e6f216409a94c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/1e493b151cda47d8a16e6f216409a94c 2024-11-28T07:21:44,935 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/35fd10f1b3774aefb915e6613c5e3639 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/35fd10f1b3774aefb915e6613c5e3639 2024-11-28T07:21:44,937 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d60ed96aa64646998148c7d8d4be7b33 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/d60ed96aa64646998148c7d8d4be7b33 2024-11-28T07:21:44,938 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/4bb0b7b4560b4ca6b99c0099f3c32f8e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/4bb0b7b4560b4ca6b99c0099f3c32f8e 2024-11-28T07:21:44,939 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bfd4429387d3473a9937df7ee87fbaba to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bfd4429387d3473a9937df7ee87fbaba 2024-11-28T07:21:44,940 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8bd417daab6f492d907a45316bc10bfe to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/8bd417daab6f492d907a45316bc10bfe 2024-11-28T07:21:44,941 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0a466356f29f41369ad9b3fec2310610 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0a466356f29f41369ad9b3fec2310610 2024-11-28T07:21:44,942 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bba21b5924c444f59e43cf42c49562b1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bba21b5924c444f59e43cf42c49562b1 2024-11-28T07:21:44,944 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bbc46fe232944492bff7f334c32aec9f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/bbc46fe232944492bff7f334c32aec9f 2024-11-28T07:21:44,945 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ad45ad41ca064ba489a3971ac4c7123e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/ad45ad41ca064ba489a3971ac4c7123e 2024-11-28T07:21:44,947 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa4aff3afbff43db80fc15e12759c0b3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/aa4aff3afbff43db80fc15e12759c0b3 2024-11-28T07:21:44,948 DEBUG [StoreCloser-TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/78373a46dcf046fabfe163fa252340e7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/78373a46dcf046fabfe163fa252340e7 2024-11-28T07:21:44,954 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/recovered.edits/646.seqid, newMaxSeqId=646, maxSeqId=1 2024-11-28T07:21:44,957 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683. 2024-11-28T07:21:44,957 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for af0c88dc7f2cd28f9a7271a3bc766683: 2024-11-28T07:21:44,959 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:44,959 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=af0c88dc7f2cd28f9a7271a3bc766683, regionState=CLOSED 2024-11-28T07:21:44,963 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-28T07:21:44,963 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure af0c88dc7f2cd28f9a7271a3bc766683, server=592d8b721726,33143,1732778474488 in 1.8570 sec 2024-11-28T07:21:44,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-28T07:21:44,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=af0c88dc7f2cd28f9a7271a3bc766683, UNASSIGN in 1.8620 sec 2024-11-28T07:21:44,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-28T07:21:44,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8690 sec 2024-11-28T07:21:44,968 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778504967"}]},"ts":"1732778504967"} 2024-11-28T07:21:44,969 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T07:21:44,971 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T07:21:44,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8880 sec 2024-11-28T07:21:45,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T07:21:45,196 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-28T07:21:45,199 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T07:21:45,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:45,204 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:45,205 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=40, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:45,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-28T07:21:45,208 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:45,212 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/recovered.edits] 2024-11-28T07:21:45,215 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/6e94992a47ba4252afd17b23c1fab254 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/6e94992a47ba4252afd17b23c1fab254 2024-11-28T07:21:45,216 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/fdad00b17b1c417c990dd59fae736c6b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/A/fdad00b17b1c417c990dd59fae736c6b 2024-11-28T07:21:45,219 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/04f32e4df145475fb9bcb906307609c1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/04f32e4df145475fb9bcb906307609c1 2024-11-28T07:21:45,220 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d6767c883af046a38af3c4caf0632a6e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/B/d6767c883af046a38af3c4caf0632a6e 2024-11-28T07:21:45,222 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0ccf1192bada4a5b87fcab52f5bee849 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/0ccf1192bada4a5b87fcab52f5bee849 2024-11-28T07:21:45,224 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/be69ebb0f5c0428c8ca8a18a839317b2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/C/be69ebb0f5c0428c8ca8a18a839317b2 2024-11-28T07:21:45,226 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/recovered.edits/646.seqid to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683/recovered.edits/646.seqid 2024-11-28T07:21:45,227 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/af0c88dc7f2cd28f9a7271a3bc766683 2024-11-28T07:21:45,227 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T07:21:45,232 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=40, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:45,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-28T07:21:45,240 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T07:21:45,278 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T07:21:45,279 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=40, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:45,279 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T07:21:45,280 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732778505279"}]},"ts":"9223372036854775807"} 2024-11-28T07:21:45,283 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T07:21:45,283 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => af0c88dc7f2cd28f9a7271a3bc766683, NAME => 'TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T07:21:45,283 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T07:21:45,284 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732778505283"}]},"ts":"9223372036854775807"} 2024-11-28T07:21:45,286 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T07:21:45,289 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=40, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:45,290 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 89 msec 2024-11-28T07:21:45,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-28T07:21:45,307 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 40 completed 2024-11-28T07:21:45,320 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=241 (was 219) Potentially hanging thread: RS:0;592d8b721726:33143-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/592d8b721726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_196941728_22 at /127.0.0.1:36074 [Waiting for operation #390] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-279090859_22 at /127.0.0.1:35932 [Waiting for operation #404] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_196941728_22 at /127.0.0.1:48066 [Waiting for operation #51] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x10bb86e4-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/592d8b721726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-279090859_22 at /127.0.0.1:40122 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=460 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=339 (was 251) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4996 (was 5569) 2024-11-28T07:21:45,328 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=241, OpenFileDescriptor=460, MaxFileDescriptor=1048576, SystemLoadAverage=339, ProcessCount=11, AvailableMemoryMB=4996 2024-11-28T07:21:45,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T07:21:45,331 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:21:45,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:45,332 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T07:21:45,333 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:45,333 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 41 2024-11-28T07:21:45,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-28T07:21:45,334 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T07:21:45,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741975_1151 (size=960) 2024-11-28T07:21:45,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-28T07:21:45,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-28T07:21:45,742 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e 2024-11-28T07:21:45,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741976_1152 (size=53) 2024-11-28T07:21:45,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-28T07:21:46,149 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:46,149 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 8dbe93101666996632a420c7c97b42e1, disabling compactions & flushes 2024-11-28T07:21:46,149 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:46,149 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:46,149 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. after waiting 0 ms 2024-11-28T07:21:46,149 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:46,149 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:46,149 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:46,150 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T07:21:46,150 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732778506150"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732778506150"}]},"ts":"1732778506150"} 2024-11-28T07:21:46,152 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T07:21:46,152 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T07:21:46,153 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778506152"}]},"ts":"1732778506152"} 2024-11-28T07:21:46,153 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T07:21:46,157 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, ASSIGN}] 2024-11-28T07:21:46,158 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, ASSIGN 2024-11-28T07:21:46,159 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, ASSIGN; state=OFFLINE, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=false 2024-11-28T07:21:46,309 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=8dbe93101666996632a420c7c97b42e1, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:46,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; OpenRegionProcedure 8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:21:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-28T07:21:46,463 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:46,466 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:46,466 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7285): Opening region: {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:21:46,467 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:46,467 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:46,467 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7327): checking encryption for 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:46,467 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7330): checking classloading for 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:46,469 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:46,470 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:21:46,470 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dbe93101666996632a420c7c97b42e1 columnFamilyName A 2024-11-28T07:21:46,470 DEBUG [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:46,471 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(327): Store=8dbe93101666996632a420c7c97b42e1/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:46,471 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:46,472 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:21:46,473 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dbe93101666996632a420c7c97b42e1 columnFamilyName B 2024-11-28T07:21:46,473 DEBUG [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:46,474 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(327): Store=8dbe93101666996632a420c7c97b42e1/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:46,474 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:46,475 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:21:46,475 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dbe93101666996632a420c7c97b42e1 columnFamilyName C 2024-11-28T07:21:46,475 DEBUG [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:46,475 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(327): Store=8dbe93101666996632a420c7c97b42e1/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:46,476 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:46,476 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:46,477 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:46,478 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:21:46,479 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1085): writing seq id for 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:46,481 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T07:21:46,482 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1102): Opened 8dbe93101666996632a420c7c97b42e1; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64870154, jitterRate=-0.033359378576278687}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:21:46,483 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1001): Region open journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:46,484 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., pid=43, masterSystemTime=1732778506462 2024-11-28T07:21:46,486 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:46,486 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:46,486 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=8dbe93101666996632a420c7c97b42e1, regionState=OPEN, openSeqNum=2, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:46,490 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-28T07:21:46,490 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; OpenRegionProcedure 8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 in 177 msec 2024-11-28T07:21:46,491 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-28T07:21:46,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, ASSIGN in 333 msec 2024-11-28T07:21:46,493 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T07:21:46,493 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778506493"}]},"ts":"1732778506493"} 2024-11-28T07:21:46,494 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T07:21:46,497 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T07:21:46,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1660 sec 2024-11-28T07:21:47,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-28T07:21:47,439 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-11-28T07:21:47,441 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x118b007e to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d29de25 2024-11-28T07:21:47,446 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a378df6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:47,448 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:47,450 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52938, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:47,452 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T07:21:47,454 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T07:21:47,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T07:21:47,459 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:21:47,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=44, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-28T07:21:47,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741977_1153 (size=996) 2024-11-28T07:21:47,878 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-28T07:21:47,878 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-28T07:21:47,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T07:21:47,890 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, REOPEN/MOVE}] 2024-11-28T07:21:47,891 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, REOPEN/MOVE 2024-11-28T07:21:47,891 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=8dbe93101666996632a420c7c97b42e1, regionState=CLOSING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:47,892 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T07:21:47,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; CloseRegionProcedure 8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:21:48,044 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:48,045 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(124): Close 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,045 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T07:21:48,045 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1681): Closing 8dbe93101666996632a420c7c97b42e1, disabling compactions & flushes 2024-11-28T07:21:48,045 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,045 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,045 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. after waiting 0 ms 2024-11-28T07:21:48,045 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,049 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-28T07:21:48,050 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,050 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1635): Region close journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:48,050 WARN [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegionServer(3786): Not adding moved region record: 8dbe93101666996632a420c7c97b42e1 to self. 2024-11-28T07:21:48,052 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(170): Closed 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,052 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=8dbe93101666996632a420c7c97b42e1, regionState=CLOSED 2024-11-28T07:21:48,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-11-28T07:21:48,054 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; CloseRegionProcedure 8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 in 160 msec 2024-11-28T07:21:48,055 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, REOPEN/MOVE; state=CLOSED, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=true 2024-11-28T07:21:48,205 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=8dbe93101666996632a420c7c97b42e1, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE; OpenRegionProcedure 8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:21:48,359 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:48,362 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,362 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7285): Opening region: {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:21:48,362 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,363 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:21:48,363 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7327): checking encryption for 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,363 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7330): checking classloading for 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,365 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,366 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:21:48,371 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dbe93101666996632a420c7c97b42e1 columnFamilyName A 2024-11-28T07:21:48,372 DEBUG [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:48,373 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(327): Store=8dbe93101666996632a420c7c97b42e1/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:48,374 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,374 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:21:48,374 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dbe93101666996632a420c7c97b42e1 columnFamilyName B 2024-11-28T07:21:48,374 DEBUG [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:48,375 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(327): Store=8dbe93101666996632a420c7c97b42e1/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:48,375 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,376 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:21:48,376 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8dbe93101666996632a420c7c97b42e1 columnFamilyName C 2024-11-28T07:21:48,376 DEBUG [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:48,377 INFO [StoreOpener-8dbe93101666996632a420c7c97b42e1-1 {}] regionserver.HStore(327): Store=8dbe93101666996632a420c7c97b42e1/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:21:48,377 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,377 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,379 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,380 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:21:48,382 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1085): writing seq id for 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,382 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1102): Opened 8dbe93101666996632a420c7c97b42e1; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72544200, jitterRate=0.08099281787872314}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:21:48,383 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1001): Region open journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:48,384 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., pid=48, masterSystemTime=1732778508359 2024-11-28T07:21:48,386 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,386 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,386 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=8dbe93101666996632a420c7c97b42e1, regionState=OPEN, openSeqNum=5, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-11-28T07:21:48,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; OpenRegionProcedure 8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 in 181 msec 2024-11-28T07:21:48,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-28T07:21:48,393 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, REOPEN/MOVE in 502 msec 2024-11-28T07:21:48,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-28T07:21:48,396 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 513 msec 2024-11-28T07:21:48,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 935 msec 2024-11-28T07:21:48,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-11-28T07:21:48,406 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79982672 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2931c73e 2024-11-28T07:21:48,411 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bad2e85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:48,412 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b4bd1ba to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@491ea2ee 2024-11-28T07:21:48,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328f994d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:48,417 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x454f1431 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@190853fc 2024-11-28T07:21:48,420 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a9306be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:48,421 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x505d5ccd to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46114993 2024-11-28T07:21:48,424 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@465dc764, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:48,425 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x367f47f7 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2885d2d9 2024-11-28T07:21:48,427 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb464a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:48,428 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22e911df to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78cafade 2024-11-28T07:21:48,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@152377d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:48,432 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3b727d6e to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14c16cd4 2024-11-28T07:21:48,435 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a52344f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:48,436 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c7940d9 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@341384e 2024-11-28T07:21:48,439 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8ba8425, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:48,439 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c38ee58 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@26b120d9 2024-11-28T07:21:48,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7af61386, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:21:48,446 DEBUG [hconnection-0x491f59a2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:48,452 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:48,452 DEBUG [hconnection-0x4959f58-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:48,452 DEBUG [hconnection-0x4360ec3c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:48,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-28T07:21:48,453 DEBUG [hconnection-0x1e70aef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:48,453 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:48,453 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52946, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:48,454 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:48,454 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:48,454 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52974, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:48,454 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52958, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:48,460 DEBUG [hconnection-0x354d3b55-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:48,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-28T07:21:48,460 DEBUG [hconnection-0x52ee7987-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:48,461 DEBUG [hconnection-0x5f17641-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:48,461 DEBUG [hconnection-0x7beb2b84-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:48,461 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:48,461 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:48,462 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:48,462 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:48,463 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:48,463 DEBUG [hconnection-0x2cb28f91-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:21:48,465 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:21:48,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:21:48,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:48,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:48,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:48,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:48,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:48,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:48,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778568526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778568527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128e66d6630b51b4c7497db713af1b7d27c_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778508476/Put/seqid=0 2024-11-28T07:21:48,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778568539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778568545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778568546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-28T07:21:48,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741978_1154 (size=12154) 2024-11-28T07:21:48,580 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:48,586 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128e66d6630b51b4c7497db713af1b7d27c_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128e66d6630b51b4c7497db713af1b7d27c_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:48,588 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0bbface044214dedaf7fa43dc80d5720, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:48,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0bbface044214dedaf7fa43dc80d5720 is 175, key is test_row_0/A:col10/1732778508476/Put/seqid=0 2024-11-28T07:21:48,606 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:48,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:48,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:48,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:48,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:48,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:48,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741979_1155 (size=30955) 2024-11-28T07:21:48,626 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0bbface044214dedaf7fa43dc80d5720 2024-11-28T07:21:48,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778568641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778568645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778568647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778568648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778568650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/57f06e404bdc432db64bcdf96c7b25d6 is 50, key is test_row_0/B:col10/1732778508476/Put/seqid=0 2024-11-28T07:21:48,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741980_1156 (size=12001) 2024-11-28T07:21:48,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:48,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:48,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:48,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:48,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:48,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-28T07:21:48,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:48,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778568849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778568850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778568850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778568861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:48,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778568861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:48,915 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:48,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:48,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:48,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:48,916 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:48,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:48,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-28T07:21:49,068 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:49,069 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:49,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:49,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,069 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/57f06e404bdc432db64bcdf96c7b25d6 2024-11-28T07:21:49,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/d0205a93515449c2820de19e5da912da is 50, key is test_row_0/C:col10/1732778508476/Put/seqid=0 2024-11-28T07:21:49,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778569152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778569153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778569154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741981_1157 (size=12001) 2024-11-28T07:21:49,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778569168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778569168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,222 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:49,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:49,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:49,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,223 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,376 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:49,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:49,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:49,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,533 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:49,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:49,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:49,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,534 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/d0205a93515449c2820de19e5da912da 2024-11-28T07:21:49,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-28T07:21:49,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0bbface044214dedaf7fa43dc80d5720 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0bbface044214dedaf7fa43dc80d5720 2024-11-28T07:21:49,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0bbface044214dedaf7fa43dc80d5720, entries=150, sequenceid=17, filesize=30.2 K 2024-11-28T07:21:49,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/57f06e404bdc432db64bcdf96c7b25d6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/57f06e404bdc432db64bcdf96c7b25d6 2024-11-28T07:21:49,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/57f06e404bdc432db64bcdf96c7b25d6, entries=150, sequenceid=17, filesize=11.7 K 2024-11-28T07:21:49,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/d0205a93515449c2820de19e5da912da as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/d0205a93515449c2820de19e5da912da 2024-11-28T07:21:49,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/d0205a93515449c2820de19e5da912da, entries=150, sequenceid=17, filesize=11.7 K 2024-11-28T07:21:49,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 8dbe93101666996632a420c7c97b42e1 in 1123ms, sequenceid=17, compaction requested=false 2024-11-28T07:21:49,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:49,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T07:21:49,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:49,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:49,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:49,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:49,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:49,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:49,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:49,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778569672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778569673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778569673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778569675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778569679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,686 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:49,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:49,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:49,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285e5af19f1cf144bdbc2c390e538bf83a_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778509660/Put/seqid=0 2024-11-28T07:21:49,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741982_1158 (size=12154) 2024-11-28T07:21:49,735 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:49,741 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285e5af19f1cf144bdbc2c390e538bf83a_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285e5af19f1cf144bdbc2c390e538bf83a_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:49,742 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/d90ee3de98e547468ae7860ea0103ac0, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:49,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/d90ee3de98e547468ae7860ea0103ac0 is 175, key is test_row_0/A:col10/1732778509660/Put/seqid=0 2024-11-28T07:21:49,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778569781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778569781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778569781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741983_1159 (size=30955) 2024-11-28T07:21:49,797 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=43, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/d90ee3de98e547468ae7860ea0103ac0 2024-11-28T07:21:49,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/e344ccd0af804bed811ba9e18abbebbc is 50, key is test_row_0/B:col10/1732778509660/Put/seqid=0 2024-11-28T07:21:49,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:49,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:49,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:49,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741984_1160 (size=12001) 2024-11-28T07:21:49,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778569987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778569987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,994 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:49,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:49,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:49,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:49,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:49,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:49,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778569987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:49,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:50,147 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:50,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:50,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:50,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:50,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:50,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:50,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:50,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:50,162 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T07:21:50,164 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39064, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T07:21:50,264 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/e344ccd0af804bed811ba9e18abbebbc 2024-11-28T07:21:50,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/aa1dd51c5a7642ea863b571913ed6b69 is 50, key is test_row_0/C:col10/1732778509660/Put/seqid=0 2024-11-28T07:21:50,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741985_1161 (size=12001) 2024-11-28T07:21:50,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778570297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:50,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778570297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778570298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:50,302 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:50,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:50,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:50,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:50,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:50,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:50,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:50,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:50,325 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/aa1dd51c5a7642ea863b571913ed6b69 2024-11-28T07:21:50,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/d90ee3de98e547468ae7860ea0103ac0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/d90ee3de98e547468ae7860ea0103ac0 2024-11-28T07:21:50,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/d90ee3de98e547468ae7860ea0103ac0, entries=150, sequenceid=43, filesize=30.2 K 2024-11-28T07:21:50,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/e344ccd0af804bed811ba9e18abbebbc as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/e344ccd0af804bed811ba9e18abbebbc 2024-11-28T07:21:50,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/e344ccd0af804bed811ba9e18abbebbc, entries=150, sequenceid=43, filesize=11.7 K 2024-11-28T07:21:50,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/aa1dd51c5a7642ea863b571913ed6b69 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/aa1dd51c5a7642ea863b571913ed6b69 2024-11-28T07:21:50,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/aa1dd51c5a7642ea863b571913ed6b69, entries=150, sequenceid=43, filesize=11.7 K 2024-11-28T07:21:50,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 8dbe93101666996632a420c7c97b42e1 in 694ms, sequenceid=43, compaction requested=false 2024-11-28T07:21:50,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:50,455 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:50,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-28T07:21:50,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:50,456 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-28T07:21:50,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:50,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:50,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:50,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:50,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:50,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:50,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f3471022df8e4b9393272ea951030202_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778509671/Put/seqid=0 2024-11-28T07:21:50,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741986_1162 (size=12154) 2024-11-28T07:21:50,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:50,513 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f3471022df8e4b9393272ea951030202_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f3471022df8e4b9393272ea951030202_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0aef7bac3f71485594d3446a40a18ff8, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:50,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0aef7bac3f71485594d3446a40a18ff8 is 175, key is test_row_0/A:col10/1732778509671/Put/seqid=0 2024-11-28T07:21:50,524 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T07:21:50,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741987_1163 (size=30955) 2024-11-28T07:21:50,532 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0aef7bac3f71485594d3446a40a18ff8 2024-11-28T07:21:50,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/72d80f11c8e14fdcb045144bcf111510 is 50, key is test_row_0/B:col10/1732778509671/Put/seqid=0 2024-11-28T07:21:50,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-28T07:21:50,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741988_1164 (size=12001) 2024-11-28T07:21:50,579 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/72d80f11c8e14fdcb045144bcf111510 2024-11-28T07:21:50,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/680537bd8a8441e0a0cea606952ec3b5 is 50, key is test_row_0/C:col10/1732778509671/Put/seqid=0 2024-11-28T07:21:50,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741989_1165 (size=12001) 2024-11-28T07:21:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:50,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:50,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778570740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:50,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778570740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:50,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778570802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:50,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778570804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:50,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778570805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:50,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778570846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:50,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:50,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778570847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,009 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/680537bd8a8441e0a0cea606952ec3b5 2024-11-28T07:21:51,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0aef7bac3f71485594d3446a40a18ff8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0aef7bac3f71485594d3446a40a18ff8 2024-11-28T07:21:51,022 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0aef7bac3f71485594d3446a40a18ff8, entries=150, sequenceid=53, filesize=30.2 K 2024-11-28T07:21:51,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/72d80f11c8e14fdcb045144bcf111510 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/72d80f11c8e14fdcb045144bcf111510 2024-11-28T07:21:51,030 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/72d80f11c8e14fdcb045144bcf111510, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T07:21:51,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/680537bd8a8441e0a0cea606952ec3b5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/680537bd8a8441e0a0cea606952ec3b5 2024-11-28T07:21:51,042 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/680537bd8a8441e0a0cea606952ec3b5, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T07:21:51,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-28T07:21:51,048 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 8dbe93101666996632a420c7c97b42e1 in 592ms, sequenceid=53, compaction requested=true 2024-11-28T07:21:51,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:51,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:51,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-28T07:21:51,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-28T07:21:51,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:51,052 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-28T07:21:51,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:51,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:51,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:51,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:51,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:51,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:51,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-28T07:21:51,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5960 sec 2024-11-28T07:21:51,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.6060 sec 2024-11-28T07:21:51,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778571070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778571070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128a5870a44ba084451b269c5779c034eb4_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778510739/Put/seqid=0 2024-11-28T07:21:51,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741990_1166 (size=14594) 2024-11-28T07:21:51,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778571175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778571175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778571383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778571384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,506 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:51,512 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128a5870a44ba084451b269c5779c034eb4_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128a5870a44ba084451b269c5779c034eb4_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:51,513 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/e4bd8e2563e34adb811e70bda22a9471, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:51,514 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/e4bd8e2563e34adb811e70bda22a9471 is 175, key is test_row_0/A:col10/1732778510739/Put/seqid=0 2024-11-28T07:21:51,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741991_1167 (size=39549) 2024-11-28T07:21:51,562 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/e4bd8e2563e34adb811e70bda22a9471 2024-11-28T07:21:51,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/73ffbd75c91f48fabbab1fe21942b96b is 50, key is test_row_0/B:col10/1732778510739/Put/seqid=0 2024-11-28T07:21:51,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741992_1168 (size=12001) 2024-11-28T07:21:51,613 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/73ffbd75c91f48fabbab1fe21942b96b 2024-11-28T07:21:51,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/86b24263dd1548ae8306c0b29dee5d07 is 50, key is test_row_0/C:col10/1732778510739/Put/seqid=0 2024-11-28T07:21:51,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741993_1169 (size=12001) 2024-11-28T07:21:51,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778571685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778571688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778571811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778571817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:51,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:51,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778571818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:52,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/86b24263dd1548ae8306c0b29dee5d07 2024-11-28T07:21:52,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/e4bd8e2563e34adb811e70bda22a9471 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e4bd8e2563e34adb811e70bda22a9471 2024-11-28T07:21:52,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e4bd8e2563e34adb811e70bda22a9471, entries=200, sequenceid=80, filesize=38.6 K 2024-11-28T07:21:52,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/73ffbd75c91f48fabbab1fe21942b96b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/73ffbd75c91f48fabbab1fe21942b96b 2024-11-28T07:21:52,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/73ffbd75c91f48fabbab1fe21942b96b, entries=150, sequenceid=80, filesize=11.7 K 2024-11-28T07:21:52,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/86b24263dd1548ae8306c0b29dee5d07 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/86b24263dd1548ae8306c0b29dee5d07 2024-11-28T07:21:52,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/86b24263dd1548ae8306c0b29dee5d07, entries=150, sequenceid=80, filesize=11.7 K 2024-11-28T07:21:52,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 8dbe93101666996632a420c7c97b42e1 in 1074ms, sequenceid=80, compaction requested=true 2024-11-28T07:21:52,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:52,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:52,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:52,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:52,125 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:52,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:52,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:52,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:21:52,126 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:52,130 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132414 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:52,130 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/A is initiating minor compaction (all files) 2024-11-28T07:21:52,131 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/A in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:52,131 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0bbface044214dedaf7fa43dc80d5720, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/d90ee3de98e547468ae7860ea0103ac0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0aef7bac3f71485594d3446a40a18ff8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e4bd8e2563e34adb811e70bda22a9471] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=129.3 K 2024-11-28T07:21:52,131 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:52,131 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/B is initiating minor compaction (all files) 2024-11-28T07:21:52,131 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/B in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:52,131 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/57f06e404bdc432db64bcdf96c7b25d6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/e344ccd0af804bed811ba9e18abbebbc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/72d80f11c8e14fdcb045144bcf111510, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/73ffbd75c91f48fabbab1fe21942b96b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=46.9 K 2024-11-28T07:21:52,131 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:52,132 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0bbface044214dedaf7fa43dc80d5720, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/d90ee3de98e547468ae7860ea0103ac0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0aef7bac3f71485594d3446a40a18ff8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e4bd8e2563e34adb811e70bda22a9471] 2024-11-28T07:21:52,132 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57f06e404bdc432db64bcdf96c7b25d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732778508474 2024-11-28T07:21:52,133 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bbface044214dedaf7fa43dc80d5720, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732778508474 2024-11-28T07:21:52,133 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e344ccd0af804bed811ba9e18abbebbc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732778508518 2024-11-28T07:21:52,134 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72d80f11c8e14fdcb045144bcf111510, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778509670 2024-11-28T07:21:52,134 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d90ee3de98e547468ae7860ea0103ac0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732778508518 2024-11-28T07:21:52,134 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73ffbd75c91f48fabbab1fe21942b96b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732778510734 2024-11-28T07:21:52,134 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0aef7bac3f71485594d3446a40a18ff8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778509670 2024-11-28T07:21:52,135 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e4bd8e2563e34adb811e70bda22a9471, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732778510728 2024-11-28T07:21:52,163 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:52,171 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#B#compaction#149 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:52,173 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c523fb93c91a4eebbaff3aaed5edb651 is 50, key is test_row_0/B:col10/1732778510739/Put/seqid=0 2024-11-28T07:21:52,182 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128ef07327309ec469ea162ad9fa8d2d7e9_8dbe93101666996632a420c7c97b42e1 store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:52,189 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128ef07327309ec469ea162ad9fa8d2d7e9_8dbe93101666996632a420c7c97b42e1, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:52,190 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ef07327309ec469ea162ad9fa8d2d7e9_8dbe93101666996632a420c7c97b42e1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:52,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:21:52,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:52,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:52,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:52,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:52,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:52,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:52,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741994_1170 (size=12139) 2024-11-28T07:21:52,245 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c523fb93c91a4eebbaff3aaed5edb651 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c523fb93c91a4eebbaff3aaed5edb651 2024-11-28T07:21:52,252 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/B of 8dbe93101666996632a420c7c97b42e1 into c523fb93c91a4eebbaff3aaed5edb651(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:52,252 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:52,252 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/B, priority=12, startTime=1732778512125; duration=0sec 2024-11-28T07:21:52,252 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:52,252 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:B 2024-11-28T07:21:52,253 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:52,255 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:52,257 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/C is initiating minor compaction (all files) 2024-11-28T07:21:52,257 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/C in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:52,257 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/d0205a93515449c2820de19e5da912da, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/aa1dd51c5a7642ea863b571913ed6b69, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/680537bd8a8441e0a0cea606952ec3b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/86b24263dd1548ae8306c0b29dee5d07] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=46.9 K 2024-11-28T07:21:52,258 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0205a93515449c2820de19e5da912da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732778508474 2024-11-28T07:21:52,259 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa1dd51c5a7642ea863b571913ed6b69, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732778508518 2024-11-28T07:21:52,259 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 680537bd8a8441e0a0cea606952ec3b5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778509670 2024-11-28T07:21:52,260 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86b24263dd1548ae8306c0b29dee5d07, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732778510734 2024-11-28T07:21:52,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741995_1171 (size=4469) 2024-11-28T07:21:52,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128e7bc8438bde64eb298775d14f8027a58_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778512196/Put/seqid=0 2024-11-28T07:21:52,278 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#A#compaction#148 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:52,280 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/fd10f151b2f849479d9e2dfc26a43a97 is 175, key is test_row_0/A:col10/1732778510739/Put/seqid=0 2024-11-28T07:21:52,283 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#C#compaction#151 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:52,284 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/4cfd24ff791b4009b19bceb19ea49b76 is 50, key is test_row_0/C:col10/1732778510739/Put/seqid=0 2024-11-28T07:21:52,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778572282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:52,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778572286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:52,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741997_1173 (size=31093) 2024-11-28T07:21:52,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741996_1172 (size=17034) 2024-11-28T07:21:52,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741998_1174 (size=12139) 2024-11-28T07:21:52,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:52,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778572388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778572389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-28T07:21:52,567 INFO [Thread-744 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-28T07:21:52,568 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-28T07:21:52,570 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:52,571 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:52,571 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T07:21:52,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:52,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778572593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:52,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778572596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T07:21:52,723 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:52,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T07:21:52,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:52,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:52,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:52,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:52,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:52,743 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/fd10f151b2f849479d9e2dfc26a43a97 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/fd10f151b2f849479d9e2dfc26a43a97 2024-11-28T07:21:52,750 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:52,757 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/A of 8dbe93101666996632a420c7c97b42e1 into fd10f151b2f849479d9e2dfc26a43a97(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:52,757 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:52,757 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/A, priority=12, startTime=1732778512125; duration=0sec 2024-11-28T07:21:52,757 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:52,759 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:A 2024-11-28T07:21:52,759 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128e7bc8438bde64eb298775d14f8027a58_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128e7bc8438bde64eb298775d14f8027a58_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:52,761 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/093112858f164f849d7c76a8866fe868, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:52,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/093112858f164f849d7c76a8866fe868 is 175, key is test_row_0/A:col10/1732778512196/Put/seqid=0 2024-11-28T07:21:52,769 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/4cfd24ff791b4009b19bceb19ea49b76 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4cfd24ff791b4009b19bceb19ea49b76 2024-11-28T07:21:52,775 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/C of 8dbe93101666996632a420c7c97b42e1 into 4cfd24ff791b4009b19bceb19ea49b76(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:52,775 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:52,776 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/C, priority=12, startTime=1732778512126; duration=0sec 2024-11-28T07:21:52,776 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:52,776 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:C 2024-11-28T07:21:52,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741999_1175 (size=48139) 2024-11-28T07:21:52,793 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/093112858f164f849d7c76a8866fe868 2024-11-28T07:21:52,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/b1dff85571da47409448a5fc4f7bad3b is 50, key is test_row_0/B:col10/1732778512196/Put/seqid=0 2024-11-28T07:21:52,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742000_1176 (size=12001) 2024-11-28T07:21:52,843 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/b1dff85571da47409448a5fc4f7bad3b 2024-11-28T07:21:52,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/b8b1446aa91d4b4ab11e4f787fbb08cf is 50, key is test_row_0/C:col10/1732778512196/Put/seqid=0 2024-11-28T07:21:52,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T07:21:52,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742001_1177 (size=12001) 2024-11-28T07:21:52,877 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:52,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T07:21:52,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:52,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:52,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:52,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:52,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:52,880 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/b8b1446aa91d4b4ab11e4f787fbb08cf 2024-11-28T07:21:52,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/093112858f164f849d7c76a8866fe868 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/093112858f164f849d7c76a8866fe868 2024-11-28T07:21:52,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/093112858f164f849d7c76a8866fe868, entries=250, sequenceid=91, filesize=47.0 K 2024-11-28T07:21:52,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/b1dff85571da47409448a5fc4f7bad3b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/b1dff85571da47409448a5fc4f7bad3b 2024-11-28T07:21:52,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778572901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:52,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/b1dff85571da47409448a5fc4f7bad3b, entries=150, sequenceid=91, filesize=11.7 K 2024-11-28T07:21:52,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778572903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:52,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/b8b1446aa91d4b4ab11e4f787fbb08cf as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b8b1446aa91d4b4ab11e4f787fbb08cf 2024-11-28T07:21:52,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b8b1446aa91d4b4ab11e4f787fbb08cf, entries=150, sequenceid=91, filesize=11.7 K 2024-11-28T07:21:52,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8dbe93101666996632a420c7c97b42e1 in 711ms, sequenceid=91, compaction requested=false 2024-11-28T07:21:52,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:53,031 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:53,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T07:21:53,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:53,032 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:21:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:53,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:53,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112807d9d81ebaef49bca0fd960d2b1f673e_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778512266/Put/seqid=0 2024-11-28T07:21:53,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742002_1178 (size=12154) 2024-11-28T07:21:53,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T07:21:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:53,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:53,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778573429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:53,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778573429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:53,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:53,525 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112807d9d81ebaef49bca0fd960d2b1f673e_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112807d9d81ebaef49bca0fd960d2b1f673e_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:53,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ffadf363beb041fd8227576cfdf033ac, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:53,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ffadf363beb041fd8227576cfdf033ac is 175, key is test_row_0/A:col10/1732778512266/Put/seqid=0 2024-11-28T07:21:53,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778573533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:53,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778573535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:53,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742003_1179 (size=30955) 2024-11-28T07:21:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T07:21:53,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:53,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778573738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:53,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:53,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778573739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:53,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:53,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778573820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:53,823 DEBUG [Thread-740 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:21:53,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:53,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778573834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:53,836 DEBUG [Thread-734 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:21:53,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:53,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778573837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:53,838 DEBUG [Thread-742 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:21:53,947 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ffadf363beb041fd8227576cfdf033ac 2024-11-28T07:21:53,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/681a48eeb15845eeb813fc4b5d382fe2 is 50, key is test_row_0/B:col10/1732778512266/Put/seqid=0 2024-11-28T07:21:53,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742004_1180 (size=12001) 2024-11-28T07:21:54,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:54,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778574042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:54,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:54,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778574043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:54,367 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/681a48eeb15845eeb813fc4b5d382fe2 2024-11-28T07:21:54,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/391ebfd10278421f956567579ab8490c is 50, key is test_row_0/C:col10/1732778512266/Put/seqid=0 2024-11-28T07:21:54,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742005_1181 (size=12001) 2024-11-28T07:21:54,418 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/391ebfd10278421f956567579ab8490c 2024-11-28T07:21:54,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ffadf363beb041fd8227576cfdf033ac as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffadf363beb041fd8227576cfdf033ac 2024-11-28T07:21:54,432 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffadf363beb041fd8227576cfdf033ac, entries=150, sequenceid=119, filesize=30.2 K 2024-11-28T07:21:54,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/681a48eeb15845eeb813fc4b5d382fe2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/681a48eeb15845eeb813fc4b5d382fe2 2024-11-28T07:21:54,440 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/681a48eeb15845eeb813fc4b5d382fe2, entries=150, sequenceid=119, filesize=11.7 K 2024-11-28T07:21:54,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/391ebfd10278421f956567579ab8490c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/391ebfd10278421f956567579ab8490c 2024-11-28T07:21:54,446 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/391ebfd10278421f956567579ab8490c, entries=150, sequenceid=119, filesize=11.7 K 2024-11-28T07:21:54,448 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 8dbe93101666996632a420c7c97b42e1 in 1415ms, sequenceid=119, compaction requested=true 2024-11-28T07:21:54,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:54,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:54,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-28T07:21:54,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-28T07:21:54,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-28T07:21:54,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8780 sec 2024-11-28T07:21:54,453 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.8840 sec 2024-11-28T07:21:54,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:54,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:21:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:54,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:54,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128782218777b714540b1f5202af8d7812e_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778513423/Put/seqid=0 2024-11-28T07:21:54,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742006_1182 (size=17184) 2024-11-28T07:21:54,621 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:54,627 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128782218777b714540b1f5202af8d7812e_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128782218777b714540b1f5202af8d7812e_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:54,628 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/bfdf3666f4ab4e8e86dd17e591495f9b, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:54,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/bfdf3666f4ab4e8e86dd17e591495f9b is 175, key is test_row_0/A:col10/1732778513423/Put/seqid=0 2024-11-28T07:21:54,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:54,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778574630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:54,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:54,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778574631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:54,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742007_1183 (size=48289) 2024-11-28T07:21:54,646 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/bfdf3666f4ab4e8e86dd17e591495f9b 2024-11-28T07:21:54,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/7d93c5f22bff4b8dad0297dc92e59617 is 50, key is test_row_0/B:col10/1732778513423/Put/seqid=0 2024-11-28T07:21:54,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T07:21:54,679 INFO [Thread-744 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-28T07:21:54,681 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:54,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-28T07:21:54,683 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:54,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-28T07:21:54,684 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:54,684 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:54,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742008_1184 (size=12101) 2024-11-28T07:21:54,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/7d93c5f22bff4b8dad0297dc92e59617 2024-11-28T07:21:54,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/eb7662a8aed047a7887689c779e8c7bd is 50, key is test_row_0/C:col10/1732778513423/Put/seqid=0 2024-11-28T07:21:54,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742009_1185 (size=12101) 2024-11-28T07:21:54,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/eb7662a8aed047a7887689c779e8c7bd 2024-11-28T07:21:54,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:54,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778574736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:54,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:54,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778574736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:54,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/bfdf3666f4ab4e8e86dd17e591495f9b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/bfdf3666f4ab4e8e86dd17e591495f9b 2024-11-28T07:21:54,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/bfdf3666f4ab4e8e86dd17e591495f9b, entries=250, sequenceid=132, filesize=47.2 K 2024-11-28T07:21:54,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/7d93c5f22bff4b8dad0297dc92e59617 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/7d93c5f22bff4b8dad0297dc92e59617 2024-11-28T07:21:54,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/7d93c5f22bff4b8dad0297dc92e59617, entries=150, sequenceid=132, filesize=11.8 K 2024-11-28T07:21:54,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/eb7662a8aed047a7887689c779e8c7bd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/eb7662a8aed047a7887689c779e8c7bd 2024-11-28T07:21:54,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/eb7662a8aed047a7887689c779e8c7bd, entries=150, sequenceid=132, filesize=11.8 K 2024-11-28T07:21:54,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 8dbe93101666996632a420c7c97b42e1 in 224ms, sequenceid=132, compaction requested=true 2024-11-28T07:21:54,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:54,775 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:54,777 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 158476 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:54,777 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/A is initiating minor compaction (all files) 2024-11-28T07:21:54,777 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/A in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:54,777 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/fd10f151b2f849479d9e2dfc26a43a97, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/093112858f164f849d7c76a8866fe868, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffadf363beb041fd8227576cfdf033ac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/bfdf3666f4ab4e8e86dd17e591495f9b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=154.8 K 2024-11-28T07:21:54,777 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:54,777 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/fd10f151b2f849479d9e2dfc26a43a97, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/093112858f164f849d7c76a8866fe868, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffadf363beb041fd8227576cfdf033ac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/bfdf3666f4ab4e8e86dd17e591495f9b] 2024-11-28T07:21:54,778 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd10f151b2f849479d9e2dfc26a43a97, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732778510734 2024-11-28T07:21:54,778 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 093112858f164f849d7c76a8866fe868, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732778511052 2024-11-28T07:21:54,779 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffadf363beb041fd8227576cfdf033ac, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732778512246 2024-11-28T07:21:54,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:54,779 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfdf3666f4ab4e8e86dd17e591495f9b, keycount=250, bloomtype=ROW, size=47.2 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732778513416 2024-11-28T07:21:54,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:54,781 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:54,783 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48242 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:54,783 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/B is initiating minor compaction (all files) 2024-11-28T07:21:54,783 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/B in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:54,783 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c523fb93c91a4eebbaff3aaed5edb651, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/b1dff85571da47409448a5fc4f7bad3b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/681a48eeb15845eeb813fc4b5d382fe2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/7d93c5f22bff4b8dad0297dc92e59617] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=47.1 K 2024-11-28T07:21:54,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-28T07:21:54,784 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c523fb93c91a4eebbaff3aaed5edb651, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732778510734 2024-11-28T07:21:54,785 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b1dff85571da47409448a5fc4f7bad3b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732778511052 2024-11-28T07:21:54,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:54,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:54,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:54,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:54,786 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 681a48eeb15845eeb813fc4b5d382fe2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732778512246 2024-11-28T07:21:54,787 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d93c5f22bff4b8dad0297dc92e59617, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732778513417 2024-11-28T07:21:54,801 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:54,810 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#B#compaction#161 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:54,811 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/290faf943bf841fcb5d0d3cffd79c97f is 50, key is test_row_0/B:col10/1732778513423/Put/seqid=0 2024-11-28T07:21:54,819 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128f38f2a3a9dce442bbbda5c2d5c4f2dea_8dbe93101666996632a420c7c97b42e1 store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:54,827 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128f38f2a3a9dce442bbbda5c2d5c4f2dea_8dbe93101666996632a420c7c97b42e1, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:54,827 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f38f2a3a9dce442bbbda5c2d5c4f2dea_8dbe93101666996632a420c7c97b42e1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:54,836 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:54,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-28T07:21:54,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:54,837 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:21:54,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:54,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:54,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:54,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:54,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:54,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:54,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742010_1186 (size=12375) 2024-11-28T07:21:54,853 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/290faf943bf841fcb5d0d3cffd79c97f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/290faf943bf841fcb5d0d3cffd79c97f 2024-11-28T07:21:54,867 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/B of 8dbe93101666996632a420c7c97b42e1 into 290faf943bf841fcb5d0d3cffd79c97f(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:54,867 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:54,867 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/B, priority=12, startTime=1732778514780; duration=0sec 2024-11-28T07:21:54,867 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:54,867 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:B 2024-11-28T07:21:54,867 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:21:54,870 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48242 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:21:54,870 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/C is initiating minor compaction (all files) 2024-11-28T07:21:54,870 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/C in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:54,871 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4cfd24ff791b4009b19bceb19ea49b76, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b8b1446aa91d4b4ab11e4f787fbb08cf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/391ebfd10278421f956567579ab8490c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/eb7662a8aed047a7887689c779e8c7bd] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=47.1 K 2024-11-28T07:21:54,871 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4cfd24ff791b4009b19bceb19ea49b76, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732778510734 2024-11-28T07:21:54,871 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b8b1446aa91d4b4ab11e4f787fbb08cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732778511052 2024-11-28T07:21:54,872 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 391ebfd10278421f956567579ab8490c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732778512246 2024-11-28T07:21:54,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112894762f6e382b434f88069b618bcc6ab7_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778514619/Put/seqid=0 2024-11-28T07:21:54,876 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting eb7662a8aed047a7887689c779e8c7bd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732778513417 2024-11-28T07:21:54,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742011_1187 (size=4469) 2024-11-28T07:21:54,890 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#A#compaction#160 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:54,891 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0547e7672ee049b4877ca8cf7f3f6979 is 175, key is test_row_0/A:col10/1732778513423/Put/seqid=0 2024-11-28T07:21:54,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742012_1188 (size=12304) 2024-11-28T07:21:54,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:54,911 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#C#compaction#163 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:54,912 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/0583332f504d46e68968751b782e43da is 50, key is test_row_0/C:col10/1732778513423/Put/seqid=0 2024-11-28T07:21:54,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742013_1189 (size=31329) 2024-11-28T07:21:54,918 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112894762f6e382b434f88069b618bcc6ab7_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112894762f6e382b434f88069b618bcc6ab7_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:54,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/88340ba3c4fa415e9852157749acdd5c, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:54,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/88340ba3c4fa415e9852157749acdd5c is 175, key is test_row_0/A:col10/1732778514619/Put/seqid=0 2024-11-28T07:21:54,930 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0547e7672ee049b4877ca8cf7f3f6979 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0547e7672ee049b4877ca8cf7f3f6979 2024-11-28T07:21:54,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742014_1190 (size=12375) 2024-11-28T07:21:54,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742015_1191 (size=31105) 2024-11-28T07:21:54,938 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/88340ba3c4fa415e9852157749acdd5c 2024-11-28T07:21:54,939 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/A of 8dbe93101666996632a420c7c97b42e1 into 0547e7672ee049b4877ca8cf7f3f6979(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:54,939 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:54,939 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/A, priority=12, startTime=1732778514775; duration=0sec 2024-11-28T07:21:54,939 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:54,939 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:A 2024-11-28T07:21:54,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:54,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:54,949 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/0583332f504d46e68968751b782e43da as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/0583332f504d46e68968751b782e43da 2024-11-28T07:21:54,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/546aa3f3eb4942f48e997dd62391dcc8 is 50, key is test_row_0/B:col10/1732778514619/Put/seqid=0 2024-11-28T07:21:54,955 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/C of 8dbe93101666996632a420c7c97b42e1 into 0583332f504d46e68968751b782e43da(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:54,955 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:54,955 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/C, priority=12, startTime=1732778514785; duration=0sec 2024-11-28T07:21:54,955 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:54,955 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:C 2024-11-28T07:21:54,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:54,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778574971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:54,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:54,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778574972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:54,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742016_1192 (size=12151) 2024-11-28T07:21:54,979 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/546aa3f3eb4942f48e997dd62391dcc8 2024-11-28T07:21:54,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-28T07:21:54,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/61fab40e9f50477881d5b64ae80e1614 is 50, key is test_row_0/C:col10/1732778514619/Put/seqid=0 2024-11-28T07:21:55,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742017_1193 (size=12151) 2024-11-28T07:21:55,030 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/61fab40e9f50477881d5b64ae80e1614 2024-11-28T07:21:55,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/88340ba3c4fa415e9852157749acdd5c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/88340ba3c4fa415e9852157749acdd5c 2024-11-28T07:21:55,042 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/88340ba3c4fa415e9852157749acdd5c, entries=150, sequenceid=156, filesize=30.4 K 2024-11-28T07:21:55,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/546aa3f3eb4942f48e997dd62391dcc8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/546aa3f3eb4942f48e997dd62391dcc8 2024-11-28T07:21:55,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,050 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/546aa3f3eb4942f48e997dd62391dcc8, entries=150, sequenceid=156, filesize=11.9 K 2024-11-28T07:21:55,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/61fab40e9f50477881d5b64ae80e1614 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/61fab40e9f50477881d5b64ae80e1614 2024-11-28T07:21:55,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,058 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/61fab40e9f50477881d5b64ae80e1614, entries=150, sequenceid=156, filesize=11.9 K 2024-11-28T07:21:55,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,059 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 8dbe93101666996632a420c7c97b42e1 in 222ms, sequenceid=156, compaction requested=false 2024-11-28T07:21:55,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:55,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:55,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-28T07:21:55,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-28T07:21:55,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,069 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-28T07:21:55,069 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 379 msec 2024-11-28T07:21:55,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 388 msec 2024-11-28T07:21:55,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:55,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:21:55,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:55,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:55,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:55,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:55,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:55,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:55,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112846e3abdbb6924be19d81d170d9121383_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778515117/Put/seqid=0 2024-11-28T07:21:55,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742018_1194 (size=12304) 2024-11-28T07:21:55,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,172 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,179 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112846e3abdbb6924be19d81d170d9121383_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112846e3abdbb6924be19d81d170d9121383_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:55,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,182 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0312c53223264132974991b2fb55ac6f, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:55,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0312c53223264132974991b2fb55ac6f is 175, key is test_row_0/A:col10/1732778515117/Put/seqid=0 2024-11-28T07:21:55,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778575190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:55,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742019_1195 (size=31101) 2024-11-28T07:21:55,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778575193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:55,200 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0312c53223264132974991b2fb55ac6f 2024-11-28T07:21:55,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c5038059ae674a30b6207ee99d4ac738 is 50, key is test_row_0/B:col10/1732778515117/Put/seqid=0 2024-11-28T07:21:55,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742020_1196 (size=9757) 2024-11-28T07:21:55,226 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c5038059ae674a30b6207ee99d4ac738 2024-11-28T07:21:55,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/194d2669f0da4af48c06fe04ef84de8d is 50, key is test_row_0/C:col10/1732778515117/Put/seqid=0 2024-11-28T07:21:55,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742021_1197 (size=9757) 2024-11-28T07:21:55,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-28T07:21:55,286 INFO [Thread-744 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-28T07:21:55,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:55,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-28T07:21:55,289 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:55,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T07:21:55,290 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:55,290 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:55,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778575294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:55,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778575298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:55,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T07:21:55,441 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:55,442 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T07:21:55,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:55,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:55,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:55,443 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:55,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:55,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778575498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:55,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778575501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:55,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T07:21:55,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:55,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T07:21:55,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:55,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:55,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:55,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:55,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:55,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:55,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/194d2669f0da4af48c06fe04ef84de8d 2024-11-28T07:21:55,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/0312c53223264132974991b2fb55ac6f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0312c53223264132974991b2fb55ac6f 2024-11-28T07:21:55,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0312c53223264132974991b2fb55ac6f, entries=150, sequenceid=172, filesize=30.4 K 2024-11-28T07:21:55,663 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-28T07:21:55,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c5038059ae674a30b6207ee99d4ac738 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c5038059ae674a30b6207ee99d4ac738 2024-11-28T07:21:55,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c5038059ae674a30b6207ee99d4ac738, entries=100, sequenceid=172, filesize=9.5 K 2024-11-28T07:21:55,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/194d2669f0da4af48c06fe04ef84de8d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/194d2669f0da4af48c06fe04ef84de8d 2024-11-28T07:21:55,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/194d2669f0da4af48c06fe04ef84de8d, entries=100, sequenceid=172, filesize=9.5 K 2024-11-28T07:21:55,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8dbe93101666996632a420c7c97b42e1 in 580ms, sequenceid=172, compaction requested=true 2024-11-28T07:21:55,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:55,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:55,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:55,698 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:55,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:55,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:55,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:55,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:55,698 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:55,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:55,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/B is initiating minor compaction (all files) 2024-11-28T07:21:55,700 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93535 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:55,700 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/B in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:55,700 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/A is initiating minor compaction (all files) 2024-11-28T07:21:55,700 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/290faf943bf841fcb5d0d3cffd79c97f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/546aa3f3eb4942f48e997dd62391dcc8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c5038059ae674a30b6207ee99d4ac738] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=33.5 K 2024-11-28T07:21:55,700 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/A in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:55,700 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0547e7672ee049b4877ca8cf7f3f6979, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/88340ba3c4fa415e9852157749acdd5c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0312c53223264132974991b2fb55ac6f] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=91.3 K 2024-11-28T07:21:55,700 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:55,700 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0547e7672ee049b4877ca8cf7f3f6979, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/88340ba3c4fa415e9852157749acdd5c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0312c53223264132974991b2fb55ac6f] 2024-11-28T07:21:55,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 290faf943bf841fcb5d0d3cffd79c97f, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732778513417 2024-11-28T07:21:55,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0547e7672ee049b4877ca8cf7f3f6979, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732778513417 2024-11-28T07:21:55,701 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 546aa3f3eb4942f48e997dd62391dcc8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732778514616 2024-11-28T07:21:55,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88340ba3c4fa415e9852157749acdd5c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732778514616 2024-11-28T07:21:55,701 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c5038059ae674a30b6207ee99d4ac738, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732778514971 2024-11-28T07:21:55,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0312c53223264132974991b2fb55ac6f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732778514969 2024-11-28T07:21:55,714 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#B#compaction#169 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:55,715 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/cd971456d5dc4a2eaeec7177982cf05a is 50, key is test_row_0/B:col10/1732778515117/Put/seqid=0 2024-11-28T07:21:55,716 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:55,736 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128fe47f9d485934a5e91c95295a554b919_8dbe93101666996632a420c7c97b42e1 store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:55,739 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128fe47f9d485934a5e91c95295a554b919_8dbe93101666996632a420c7c97b42e1, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:55,739 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128fe47f9d485934a5e91c95295a554b919_8dbe93101666996632a420c7c97b42e1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:55,749 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:55,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T07:21:55,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:55,750 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T07:21:55,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:55,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:55,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:55,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:55,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:55,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:55,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742022_1198 (size=12527) 2024-11-28T07:21:55,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128befb5c1eedba453e95b0554a03057056_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778515183/Put/seqid=0 2024-11-28T07:21:55,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742023_1199 (size=4469) 2024-11-28T07:21:55,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742024_1200 (size=12304) 2024-11-28T07:21:55,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:55,779 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128befb5c1eedba453e95b0554a03057056_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128befb5c1eedba453e95b0554a03057056_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:55,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ae495b8a9beb40d1a216fd0b9eea9660, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:55,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ae495b8a9beb40d1a216fd0b9eea9660 is 175, key is test_row_0/A:col10/1732778515183/Put/seqid=0 2024-11-28T07:21:55,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742025_1201 (size=31105) 2024-11-28T07:21:55,802 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=195, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ae495b8a9beb40d1a216fd0b9eea9660 2024-11-28T07:21:55,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:55,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c831909b64874d4f892ccbbc3e3011ba is 50, key is test_row_0/B:col10/1732778515183/Put/seqid=0 2024-11-28T07:21:55,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778575829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:55,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778575830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:55,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742026_1202 (size=12151) 2024-11-28T07:21:55,847 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c831909b64874d4f892ccbbc3e3011ba 2024-11-28T07:21:55,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/3d58c198624c4a1c9cd5416d90fb4e6d is 50, key is test_row_0/C:col10/1732778515183/Put/seqid=0 2024-11-28T07:21:55,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742027_1203 (size=12151) 2024-11-28T07:21:55,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T07:21:55,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778575936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:55,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:55,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778575937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778576140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778576140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,170 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/cd971456d5dc4a2eaeec7177982cf05a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cd971456d5dc4a2eaeec7177982cf05a 2024-11-28T07:21:56,172 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#A#compaction#170 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:56,173 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/08e5ac5c951b46bc8dc645f4d09e540c is 175, key is test_row_0/A:col10/1732778515117/Put/seqid=0 2024-11-28T07:21:56,179 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/B of 8dbe93101666996632a420c7c97b42e1 into cd971456d5dc4a2eaeec7177982cf05a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:56,179 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:56,179 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/B, priority=13, startTime=1732778515698; duration=0sec 2024-11-28T07:21:56,179 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:56,179 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:B 2024-11-28T07:21:56,179 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:56,181 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:56,181 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/C is initiating minor compaction (all files) 2024-11-28T07:21:56,181 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/C in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,181 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/0583332f504d46e68968751b782e43da, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/61fab40e9f50477881d5b64ae80e1614, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/194d2669f0da4af48c06fe04ef84de8d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=33.5 K 2024-11-28T07:21:56,182 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0583332f504d46e68968751b782e43da, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732778513417 2024-11-28T07:21:56,182 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 61fab40e9f50477881d5b64ae80e1614, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732778514616 2024-11-28T07:21:56,182 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 194d2669f0da4af48c06fe04ef84de8d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732778514971 2024-11-28T07:21:56,207 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#C#compaction#174 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:56,208 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/62e361fc888f46b4865bbc2e2ba9dae2 is 50, key is test_row_0/C:col10/1732778515117/Put/seqid=0 2024-11-28T07:21:56,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742028_1204 (size=31588) 2024-11-28T07:21:56,223 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/08e5ac5c951b46bc8dc645f4d09e540c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/08e5ac5c951b46bc8dc645f4d09e540c 2024-11-28T07:21:56,234 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/A of 8dbe93101666996632a420c7c97b42e1 into 08e5ac5c951b46bc8dc645f4d09e540c(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:56,234 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:56,234 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/A, priority=13, startTime=1732778515698; duration=0sec 2024-11-28T07:21:56,235 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:56,235 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:A 2024-11-28T07:21:56,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742029_1205 (size=12527) 2024-11-28T07:21:56,264 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/62e361fc888f46b4865bbc2e2ba9dae2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/62e361fc888f46b4865bbc2e2ba9dae2 2024-11-28T07:21:56,271 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/C of 8dbe93101666996632a420c7c97b42e1 into 62e361fc888f46b4865bbc2e2ba9dae2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:56,271 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:56,271 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/C, priority=13, startTime=1732778515698; duration=0sec 2024-11-28T07:21:56,271 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:56,271 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:C 2024-11-28T07:21:56,275 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/3d58c198624c4a1c9cd5416d90fb4e6d 2024-11-28T07:21:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ae495b8a9beb40d1a216fd0b9eea9660 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ae495b8a9beb40d1a216fd0b9eea9660 2024-11-28T07:21:56,297 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ae495b8a9beb40d1a216fd0b9eea9660, entries=150, sequenceid=195, filesize=30.4 K 2024-11-28T07:21:56,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c831909b64874d4f892ccbbc3e3011ba as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c831909b64874d4f892ccbbc3e3011ba 2024-11-28T07:21:56,307 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c831909b64874d4f892ccbbc3e3011ba, entries=150, sequenceid=195, filesize=11.9 K 2024-11-28T07:21:56,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/3d58c198624c4a1c9cd5416d90fb4e6d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3d58c198624c4a1c9cd5416d90fb4e6d 2024-11-28T07:21:56,315 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3d58c198624c4a1c9cd5416d90fb4e6d, entries=150, sequenceid=195, filesize=11.9 K 2024-11-28T07:21:56,317 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 8dbe93101666996632a420c7c97b42e1 in 568ms, sequenceid=195, compaction requested=false 2024-11-28T07:21:56,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:56,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-28T07:21:56,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-28T07:21:56,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-28T07:21:56,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0280 sec 2024-11-28T07:21:56,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.0340 sec 2024-11-28T07:21:56,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T07:21:56,393 INFO [Thread-744 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-28T07:21:56,396 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:21:56,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-28T07:21:56,398 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:21:56,398 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:21:56,398 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:21:56,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T07:21:56,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:56,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T07:21:56,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:56,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:56,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:56,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:56,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:56,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:56,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112803dd37a897004a53946931aa2f6f3315_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778516444/Put/seqid=0 2024-11-28T07:21:56,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742030_1206 (size=12304) 2024-11-28T07:21:56,489 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:56,495 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112803dd37a897004a53946931aa2f6f3315_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112803dd37a897004a53946931aa2f6f3315_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:56,497 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/aea682038f20403485b1534bcddaa821, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:56,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778576490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/aea682038f20403485b1534bcddaa821 is 175, key is test_row_0/A:col10/1732778516444/Put/seqid=0 2024-11-28T07:21:56,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778576494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T07:21:56,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742031_1207 (size=31105) 2024-11-28T07:21:56,511 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/aea682038f20403485b1534bcddaa821 2024-11-28T07:21:56,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/02acc0f83cba465ba39fe5a0acba67e5 is 50, key is test_row_0/B:col10/1732778516444/Put/seqid=0 2024-11-28T07:21:56,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742032_1208 (size=12151) 2024-11-28T07:21:56,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/02acc0f83cba465ba39fe5a0acba67e5 2024-11-28T07:21:56,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/feef6bb144a0496ba183d0091fe63f57 is 50, key is test_row_0/C:col10/1732778516444/Put/seqid=0 2024-11-28T07:21:56,551 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:56,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:56,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:56,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:56,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:56,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:56,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742033_1209 (size=12151) 2024-11-28T07:21:56,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/feef6bb144a0496ba183d0091fe63f57 2024-11-28T07:21:56,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/aea682038f20403485b1534bcddaa821 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/aea682038f20403485b1534bcddaa821 2024-11-28T07:21:56,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/aea682038f20403485b1534bcddaa821, entries=150, sequenceid=213, filesize=30.4 K 2024-11-28T07:21:56,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/02acc0f83cba465ba39fe5a0acba67e5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/02acc0f83cba465ba39fe5a0acba67e5 2024-11-28T07:21:56,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/02acc0f83cba465ba39fe5a0acba67e5, entries=150, sequenceid=213, filesize=11.9 K 2024-11-28T07:21:56,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/feef6bb144a0496ba183d0091fe63f57 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/feef6bb144a0496ba183d0091fe63f57 2024-11-28T07:21:56,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/feef6bb144a0496ba183d0091fe63f57, entries=150, sequenceid=213, filesize=11.9 K 2024-11-28T07:21:56,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 8dbe93101666996632a420c7c97b42e1 in 142ms, sequenceid=213, compaction requested=true 2024-11-28T07:21:56,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:56,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:56,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:56,589 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:56,589 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:56,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:56,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:56,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:56,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:56,591 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:56,591 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93798 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:56,591 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/A is initiating minor compaction (all files) 2024-11-28T07:21:56,591 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/B is initiating minor compaction (all files) 2024-11-28T07:21:56,591 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/A in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,591 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/B in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,591 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/08e5ac5c951b46bc8dc645f4d09e540c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ae495b8a9beb40d1a216fd0b9eea9660, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/aea682038f20403485b1534bcddaa821] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=91.6 K 2024-11-28T07:21:56,591 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cd971456d5dc4a2eaeec7177982cf05a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c831909b64874d4f892ccbbc3e3011ba, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/02acc0f83cba465ba39fe5a0acba67e5] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.0 K 2024-11-28T07:21:56,591 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,591 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/08e5ac5c951b46bc8dc645f4d09e540c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ae495b8a9beb40d1a216fd0b9eea9660, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/aea682038f20403485b1534bcddaa821] 2024-11-28T07:21:56,592 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting cd971456d5dc4a2eaeec7177982cf05a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732778514630 2024-11-28T07:21:56,592 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08e5ac5c951b46bc8dc645f4d09e540c, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732778514630 2024-11-28T07:21:56,592 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c831909b64874d4f892ccbbc3e3011ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732778515181 2024-11-28T07:21:56,593 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae495b8a9beb40d1a216fd0b9eea9660, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732778515181 2024-11-28T07:21:56,593 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 02acc0f83cba465ba39fe5a0acba67e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732778515824 2024-11-28T07:21:56,594 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting aea682038f20403485b1534bcddaa821, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732778515824 2024-11-28T07:21:56,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:56,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T07:21:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:56,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:56,605 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#B#compaction#178 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:56,606 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/69af8e2ebfc7413998c75260942c189e is 50, key is test_row_0/B:col10/1732778516444/Put/seqid=0 2024-11-28T07:21:56,607 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:56,616 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411280b6a5d8d2c494d6a8ce1941f2dc680eb_8dbe93101666996632a420c7c97b42e1 store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:56,619 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411280b6a5d8d2c494d6a8ce1941f2dc680eb_8dbe93101666996632a420c7c97b42e1, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:56,619 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280b6a5d8d2c494d6a8ce1941f2dc680eb_8dbe93101666996632a420c7c97b42e1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:56,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289d8431e930f340b692313b72dee40dcd_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778516481/Put/seqid=0 2024-11-28T07:21:56,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778576634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778576635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742034_1210 (size=12629) 2024-11-28T07:21:56,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742035_1211 (size=4469) 2024-11-28T07:21:56,663 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#A#compaction#179 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:56,663 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ffa322011e0842b8997c9e8779ac67c8 is 175, key is test_row_0/A:col10/1732778516444/Put/seqid=0 2024-11-28T07:21:56,668 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/69af8e2ebfc7413998c75260942c189e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/69af8e2ebfc7413998c75260942c189e 2024-11-28T07:21:56,677 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/B of 8dbe93101666996632a420c7c97b42e1 into 69af8e2ebfc7413998c75260942c189e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:56,677 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:56,677 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/B, priority=13, startTime=1732778516589; duration=0sec 2024-11-28T07:21:56,678 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:56,678 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:B 2024-11-28T07:21:56,678 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:56,679 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:56,679 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/C is initiating minor compaction (all files) 2024-11-28T07:21:56,679 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/C in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,680 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/62e361fc888f46b4865bbc2e2ba9dae2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3d58c198624c4a1c9cd5416d90fb4e6d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/feef6bb144a0496ba183d0091fe63f57] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.0 K 2024-11-28T07:21:56,681 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 62e361fc888f46b4865bbc2e2ba9dae2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732778514630 2024-11-28T07:21:56,682 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d58c198624c4a1c9cd5416d90fb4e6d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732778515181 2024-11-28T07:21:56,682 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting feef6bb144a0496ba183d0091fe63f57, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732778515824 2024-11-28T07:21:56,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742036_1212 (size=14794) 2024-11-28T07:21:56,690 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:56,695 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289d8431e930f340b692313b72dee40dcd_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289d8431e930f340b692313b72dee40dcd_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:56,696 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ba0bede42de74697b0bf6eda5c6765cf, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:56,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ba0bede42de74697b0bf6eda5c6765cf is 175, key is test_row_0/A:col10/1732778516481/Put/seqid=0 2024-11-28T07:21:56,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T07:21:56,703 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:56,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:56,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:56,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:56,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:56,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:56,708 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#C#compaction#181 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:56,708 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/f31880a9d7a94ce7bfc25a89190fd974 is 50, key is test_row_0/C:col10/1732778516444/Put/seqid=0 2024-11-28T07:21:56,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742037_1213 (size=31583) 2024-11-28T07:21:56,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742038_1214 (size=39749) 2024-11-28T07:21:56,721 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ba0bede42de74697b0bf6eda5c6765cf 2024-11-28T07:21:56,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742039_1215 (size=12629) 2024-11-28T07:21:56,731 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/f31880a9d7a94ce7bfc25a89190fd974 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f31880a9d7a94ce7bfc25a89190fd974 2024-11-28T07:21:56,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/a5eea05d18e44245a50c99d54bdfd89b is 50, key is test_row_0/B:col10/1732778516481/Put/seqid=0 2024-11-28T07:21:56,737 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/C of 8dbe93101666996632a420c7c97b42e1 into f31880a9d7a94ce7bfc25a89190fd974(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:56,737 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:56,737 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/C, priority=13, startTime=1732778516589; duration=0sec 2024-11-28T07:21:56,737 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:56,737 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:C 2024-11-28T07:21:56,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742040_1216 (size=12151) 2024-11-28T07:21:56,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778576741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778576743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,860 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:56,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:56,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:56,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:56,862 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:56,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:56,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:56,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778576946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:56,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:56,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778576946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T07:21:57,014 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:57,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:57,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:57,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,117 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ffa322011e0842b8997c9e8779ac67c8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffa322011e0842b8997c9e8779ac67c8 2024-11-28T07:21:57,124 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/A of 8dbe93101666996632a420c7c97b42e1 into ffa322011e0842b8997c9e8779ac67c8(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:57,124 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:57,124 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/A, priority=13, startTime=1732778516589; duration=0sec 2024-11-28T07:21:57,124 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:57,124 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:A 2024-11-28T07:21:57,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/a5eea05d18e44245a50c99d54bdfd89b 2024-11-28T07:21:57,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/3b09476166bd46aea1c476c30cd504c7 is 50, key is test_row_0/C:col10/1732778516481/Put/seqid=0 2024-11-28T07:21:57,168 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:57,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:57,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:57,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742041_1217 (size=12151) 2024-11-28T07:21:57,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/3b09476166bd46aea1c476c30cd504c7 2024-11-28T07:21:57,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/ba0bede42de74697b0bf6eda5c6765cf as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ba0bede42de74697b0bf6eda5c6765cf 2024-11-28T07:21:57,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ba0bede42de74697b0bf6eda5c6765cf, entries=200, sequenceid=235, filesize=38.8 K 2024-11-28T07:21:57,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/a5eea05d18e44245a50c99d54bdfd89b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a5eea05d18e44245a50c99d54bdfd89b 2024-11-28T07:21:57,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a5eea05d18e44245a50c99d54bdfd89b, entries=150, sequenceid=235, filesize=11.9 K 2024-11-28T07:21:57,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/3b09476166bd46aea1c476c30cd504c7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3b09476166bd46aea1c476c30cd504c7 2024-11-28T07:21:57,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3b09476166bd46aea1c476c30cd504c7, entries=150, sequenceid=235, filesize=11.9 K 2024-11-28T07:21:57,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 8dbe93101666996632a420c7c97b42e1 in 602ms, sequenceid=235, compaction requested=false 2024-11-28T07:21:57,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:57,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:57,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T07:21:57,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:57,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:57,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:57,251 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:57,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:57,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:57,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b5f8310c32934cb08449a58e4e5e49da_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778516633/Put/seqid=0 2024-11-28T07:21:57,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742042_1218 (size=12304) 2024-11-28T07:21:57,277 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:57,284 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b5f8310c32934cb08449a58e4e5e49da_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b5f8310c32934cb08449a58e4e5e49da_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:57,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778577286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778577289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,293 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/dc6c75b2f4724f6799271566bc95af58, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:57,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/dc6c75b2f4724f6799271566bc95af58 is 175, key is test_row_0/A:col10/1732778516633/Put/seqid=0 2024-11-28T07:21:57,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742043_1219 (size=31105) 2024-11-28T07:21:57,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:57,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:57,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:57,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778577390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778577390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,475 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:57,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:57,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:57,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T07:21:57,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778577592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778577593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:57,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:57,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:57,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,715 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=254, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/dc6c75b2f4724f6799271566bc95af58 2024-11-28T07:21:57,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/a0af38e4d0724dae95db1bdf8d2f4677 is 50, key is test_row_0/B:col10/1732778516633/Put/seqid=0 2024-11-28T07:21:57,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742044_1220 (size=12151) 2024-11-28T07:21:57,781 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:57,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:57,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:57,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778577857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,858 DEBUG [Thread-740 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:21:57,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778577870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,873 DEBUG [Thread-734 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8200 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:21:57,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778577872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,874 DEBUG [Thread-742 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8200 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:21:57,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778577896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778577898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:57,934 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:57,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:57,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:57,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:57,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:57,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:58,087 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:58,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:58,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:58,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:58,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:58,088 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:58,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:21:58,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/a0af38e4d0724dae95db1bdf8d2f4677 2024-11-28T07:21:58,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/3e0a28f000ec4d5aabb044a92cf719d3 is 50, key is test_row_0/C:col10/1732778516633/Put/seqid=0 2024-11-28T07:21:58,154 INFO [master/592d8b721726:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-28T07:21:58,154 INFO [master/592d8b721726:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-28T07:21:58,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742045_1221 (size=12151) 2024-11-28T07:21:58,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/3e0a28f000ec4d5aabb044a92cf719d3 2024-11-28T07:21:58,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/dc6c75b2f4724f6799271566bc95af58 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/dc6c75b2f4724f6799271566bc95af58 2024-11-28T07:21:58,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/dc6c75b2f4724f6799271566bc95af58, entries=150, sequenceid=254, filesize=30.4 K 2024-11-28T07:21:58,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/a0af38e4d0724dae95db1bdf8d2f4677 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a0af38e4d0724dae95db1bdf8d2f4677 2024-11-28T07:21:58,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a0af38e4d0724dae95db1bdf8d2f4677, entries=150, sequenceid=254, filesize=11.9 K 2024-11-28T07:21:58,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/3e0a28f000ec4d5aabb044a92cf719d3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3e0a28f000ec4d5aabb044a92cf719d3 2024-11-28T07:21:58,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3e0a28f000ec4d5aabb044a92cf719d3, entries=150, sequenceid=254, filesize=11.9 K 2024-11-28T07:21:58,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 8dbe93101666996632a420c7c97b42e1 in 927ms, sequenceid=254, compaction requested=true 2024-11-28T07:21:58,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:58,179 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:58,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:21:58,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:58,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:21:58,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:58,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:21:58,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:58,179 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:58,180 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:58,180 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/A is initiating minor compaction (all files) 2024-11-28T07:21:58,180 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/A in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:58,180 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:58,181 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/B is initiating minor compaction (all files) 2024-11-28T07:21:58,181 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffa322011e0842b8997c9e8779ac67c8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ba0bede42de74697b0bf6eda5c6765cf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/dc6c75b2f4724f6799271566bc95af58] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=100.0 K 2024-11-28T07:21:58,181 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/B in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:58,181 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:58,181 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffa322011e0842b8997c9e8779ac67c8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ba0bede42de74697b0bf6eda5c6765cf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/dc6c75b2f4724f6799271566bc95af58] 2024-11-28T07:21:58,181 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/69af8e2ebfc7413998c75260942c189e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a5eea05d18e44245a50c99d54bdfd89b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a0af38e4d0724dae95db1bdf8d2f4677] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.1 K 2024-11-28T07:21:58,181 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ffa322011e0842b8997c9e8779ac67c8, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732778515824 2024-11-28T07:21:58,181 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69af8e2ebfc7413998c75260942c189e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732778515824 2024-11-28T07:21:58,182 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5eea05d18e44245a50c99d54bdfd89b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732778516481 2024-11-28T07:21:58,182 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ba0bede42de74697b0bf6eda5c6765cf, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732778516477 2024-11-28T07:21:58,182 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0af38e4d0724dae95db1bdf8d2f4677, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778516627 2024-11-28T07:21:58,182 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting dc6c75b2f4724f6799271566bc95af58, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778516627 2024-11-28T07:21:58,191 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:58,193 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#B#compaction#188 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:58,194 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/16c5f124ab824cf0b1f2a4f07fb8b881 is 50, key is test_row_0/B:col10/1732778516633/Put/seqid=0 2024-11-28T07:21:58,197 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411284cbb7919d2e94b35b1b4ff9beffcc9b9_8dbe93101666996632a420c7c97b42e1 store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:58,199 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411284cbb7919d2e94b35b1b4ff9beffcc9b9_8dbe93101666996632a420c7c97b42e1, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:58,199 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284cbb7919d2e94b35b1b4ff9beffcc9b9_8dbe93101666996632a420c7c97b42e1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:58,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742047_1223 (size=4469) 2024-11-28T07:21:58,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742046_1222 (size=12731) 2024-11-28T07:21:58,219 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/16c5f124ab824cf0b1f2a4f07fb8b881 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/16c5f124ab824cf0b1f2a4f07fb8b881 2024-11-28T07:21:58,223 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/B of 8dbe93101666996632a420c7c97b42e1 into 16c5f124ab824cf0b1f2a4f07fb8b881(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:58,223 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:58,223 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/B, priority=13, startTime=1732778518179; duration=0sec 2024-11-28T07:21:58,224 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:21:58,224 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:B 2024-11-28T07:21:58,224 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:21:58,226 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:21:58,226 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/C is initiating minor compaction (all files) 2024-11-28T07:21:58,226 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/C in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:58,226 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f31880a9d7a94ce7bfc25a89190fd974, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3b09476166bd46aea1c476c30cd504c7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3e0a28f000ec4d5aabb044a92cf719d3] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.1 K 2024-11-28T07:21:58,227 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f31880a9d7a94ce7bfc25a89190fd974, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732778515824 2024-11-28T07:21:58,227 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b09476166bd46aea1c476c30cd504c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732778516481 2024-11-28T07:21:58,227 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e0a28f000ec4d5aabb044a92cf719d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778516627 2024-11-28T07:21:58,240 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:21:58,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T07:21:58,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:58,241 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-28T07:21:58,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:58,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:58,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:58,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:58,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:58,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:58,243 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#C#compaction#189 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:58,244 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/89ece376586d48b78e45878dade5f32e is 50, key is test_row_0/C:col10/1732778516633/Put/seqid=0 2024-11-28T07:21:58,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c3af4cb6b4ec4dc39dfdadc2744adaf0_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778517287/Put/seqid=0 2024-11-28T07:21:58,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742048_1224 (size=12731) 2024-11-28T07:21:58,280 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/89ece376586d48b78e45878dade5f32e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/89ece376586d48b78e45878dade5f32e 2024-11-28T07:21:58,286 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/C of 8dbe93101666996632a420c7c97b42e1 into 89ece376586d48b78e45878dade5f32e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:58,286 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:58,286 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/C, priority=13, startTime=1732778518179; duration=0sec 2024-11-28T07:21:58,286 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:58,286 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:C 2024-11-28T07:21:58,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742049_1225 (size=12454) 2024-11-28T07:21:58,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:58,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:21:58,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:58,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778578421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:58,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:58,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778578422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:58,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T07:21:58,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:58,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778578523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:58,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778578526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:58,613 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#A#compaction#187 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:21:58,614 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/f5626bbd2ba14622b187bae81efffb56 is 175, key is test_row_0/A:col10/1732778516633/Put/seqid=0 2024-11-28T07:21:58,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742050_1226 (size=31685) 2024-11-28T07:21:58,634 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/f5626bbd2ba14622b187bae81efffb56 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/f5626bbd2ba14622b187bae81efffb56 2024-11-28T07:21:58,646 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/A of 8dbe93101666996632a420c7c97b42e1 into f5626bbd2ba14622b187bae81efffb56(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:21:58,646 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:58,646 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/A, priority=13, startTime=1732778518179; duration=0sec 2024-11-28T07:21:58,647 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:21:58,647 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:A 2024-11-28T07:21:58,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:58,713 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c3af4cb6b4ec4dc39dfdadc2744adaf0_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c3af4cb6b4ec4dc39dfdadc2744adaf0_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:58,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/5c3ae25bd8174b099568950d3c15f602, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:58,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/5c3ae25bd8174b099568950d3c15f602 is 175, key is test_row_0/A:col10/1732778517287/Put/seqid=0 2024-11-28T07:21:58,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:58,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778578726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:58,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:58,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778578730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:58,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742051_1227 (size=31255) 2024-11-28T07:21:58,751 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=275, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/5c3ae25bd8174b099568950d3c15f602 2024-11-28T07:21:58,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/1fcc6b8e31a1417386397d69967b3b8e is 50, key is test_row_0/B:col10/1732778517287/Put/seqid=0 2024-11-28T07:21:58,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742052_1228 (size=12301) 2024-11-28T07:21:59,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778579030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:59,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:59,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778579034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:59,177 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/1fcc6b8e31a1417386397d69967b3b8e 2024-11-28T07:21:59,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/b9181907be384567bfdbda2c1016a6a3 is 50, key is test_row_0/C:col10/1732778517287/Put/seqid=0 2024-11-28T07:21:59,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742053_1229 (size=12301) 2024-11-28T07:21:59,219 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/b9181907be384567bfdbda2c1016a6a3 2024-11-28T07:21:59,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/5c3ae25bd8174b099568950d3c15f602 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5c3ae25bd8174b099568950d3c15f602 2024-11-28T07:21:59,234 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5c3ae25bd8174b099568950d3c15f602, entries=150, sequenceid=275, filesize=30.5 K 2024-11-28T07:21:59,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/1fcc6b8e31a1417386397d69967b3b8e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/1fcc6b8e31a1417386397d69967b3b8e 2024-11-28T07:21:59,242 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/1fcc6b8e31a1417386397d69967b3b8e, entries=150, sequenceid=275, filesize=12.0 K 2024-11-28T07:21:59,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/b9181907be384567bfdbda2c1016a6a3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b9181907be384567bfdbda2c1016a6a3 2024-11-28T07:21:59,250 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b9181907be384567bfdbda2c1016a6a3, entries=150, sequenceid=275, filesize=12.0 K 2024-11-28T07:21:59,251 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 8dbe93101666996632a420c7c97b42e1 in 1010ms, sequenceid=275, compaction requested=false 2024-11-28T07:21:59,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:21:59,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:21:59,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-28T07:21:59,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-28T07:21:59,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-28T07:21:59,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8540 sec 2024-11-28T07:21:59,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.8580 sec 2024-11-28T07:21:59,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:59,538 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-28T07:21:59,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:21:59,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:59,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:21:59,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:59,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:21:59,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:21:59,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286ca4a582550449d2ad8f7778ef4cbc9c_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778519538/Put/seqid=0 2024-11-28T07:21:59,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742054_1230 (size=12454) 2024-11-28T07:21:59,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778579574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:59,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778579575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:59,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778579677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:59,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:59,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778579677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:59,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:59,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:21:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778579880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778579878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:21:59,960 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:21:59,966 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286ca4a582550449d2ad8f7778ef4cbc9c_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286ca4a582550449d2ad8f7778ef4cbc9c_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:21:59,967 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/37982bfa7c23484baab4afc07a2c861d, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:21:59,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/37982bfa7c23484baab4afc07a2c861d is 175, key is test_row_0/A:col10/1732778519538/Put/seqid=0 2024-11-28T07:21:59,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742055_1231 (size=31255) 2024-11-28T07:21:59,992 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/37982bfa7c23484baab4afc07a2c861d 2024-11-28T07:22:00,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/9dbb1fee18c3476abd05bbf4fff0fef5 is 50, key is test_row_0/B:col10/1732778519538/Put/seqid=0 2024-11-28T07:22:00,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742056_1232 (size=12301) 2024-11-28T07:22:00,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:00,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778580181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778580181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:00,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/9dbb1fee18c3476abd05bbf4fff0fef5 2024-11-28T07:22:00,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/da583e07fc854b9895de4b4e1e97fc7d is 50, key is test_row_0/C:col10/1732778519538/Put/seqid=0 2024-11-28T07:22:00,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742057_1233 (size=12301) 2024-11-28T07:22:00,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T07:22:00,504 INFO [Thread-744 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-28T07:22:00,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:00,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-28T07:22:00,508 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:00,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-28T07:22:00,509 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:00,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:00,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-28T07:22:00,661 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:00,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-28T07:22:00,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:00,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:00,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:00,663 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:00,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:00,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:00,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:00,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778580684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:00,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:00,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778580688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:00,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-28T07:22:00,815 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:00,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-28T07:22:00,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:00,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:00,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:00,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:00,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:00,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:00,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/da583e07fc854b9895de4b4e1e97fc7d 2024-11-28T07:22:00,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/37982bfa7c23484baab4afc07a2c861d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/37982bfa7c23484baab4afc07a2c861d 2024-11-28T07:22:00,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/37982bfa7c23484baab4afc07a2c861d, entries=150, sequenceid=295, filesize=30.5 K 2024-11-28T07:22:00,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/9dbb1fee18c3476abd05bbf4fff0fef5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/9dbb1fee18c3476abd05bbf4fff0fef5 2024-11-28T07:22:00,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/9dbb1fee18c3476abd05bbf4fff0fef5, entries=150, sequenceid=295, filesize=12.0 K 2024-11-28T07:22:00,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/da583e07fc854b9895de4b4e1e97fc7d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/da583e07fc854b9895de4b4e1e97fc7d 2024-11-28T07:22:00,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/da583e07fc854b9895de4b4e1e97fc7d, entries=150, sequenceid=295, filesize=12.0 K 2024-11-28T07:22:00,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 8dbe93101666996632a420c7c97b42e1 in 1324ms, sequenceid=295, compaction requested=true 2024-11-28T07:22:00,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:00,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:00,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:00,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:00,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:00,862 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:00,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:00,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:22:00,863 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:00,864 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:00,864 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/A is initiating minor compaction (all files) 2024-11-28T07:22:00,864 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/A in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:00,864 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:00,864 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/f5626bbd2ba14622b187bae81efffb56, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5c3ae25bd8174b099568950d3c15f602, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/37982bfa7c23484baab4afc07a2c861d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=92.0 K 2024-11-28T07:22:00,864 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/B is initiating minor compaction (all files) 2024-11-28T07:22:00,864 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:00,864 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/B in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:00,864 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/f5626bbd2ba14622b187bae81efffb56, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5c3ae25bd8174b099568950d3c15f602, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/37982bfa7c23484baab4afc07a2c861d] 2024-11-28T07:22:00,864 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/16c5f124ab824cf0b1f2a4f07fb8b881, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/1fcc6b8e31a1417386397d69967b3b8e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/9dbb1fee18c3476abd05bbf4fff0fef5] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.5 K 2024-11-28T07:22:00,865 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5626bbd2ba14622b187bae81efffb56, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778516627 2024-11-28T07:22:00,865 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 16c5f124ab824cf0b1f2a4f07fb8b881, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778516627 2024-11-28T07:22:00,866 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c3ae25bd8174b099568950d3c15f602, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732778517259 2024-11-28T07:22:00,866 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fcc6b8e31a1417386397d69967b3b8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732778517259 2024-11-28T07:22:00,866 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37982bfa7c23484baab4afc07a2c861d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732778518420 2024-11-28T07:22:00,866 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dbb1fee18c3476abd05bbf4fff0fef5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732778518420 2024-11-28T07:22:00,883 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:00,886 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#B#compaction#197 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:00,886 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/de0a4383f4ca4cab91c5a2f6e3d310a1 is 50, key is test_row_0/B:col10/1732778519538/Put/seqid=0 2024-11-28T07:22:00,903 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411288b54938c7dec40cca122429f7229dc1d_8dbe93101666996632a420c7c97b42e1 store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:00,905 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411288b54938c7dec40cca122429f7229dc1d_8dbe93101666996632a420c7c97b42e1, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:00,905 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288b54938c7dec40cca122429f7229dc1d_8dbe93101666996632a420c7c97b42e1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:00,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742059_1235 (size=4469) 2024-11-28T07:22:00,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742058_1234 (size=12983) 2024-11-28T07:22:00,937 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#A#compaction#196 average throughput is 0.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:00,938 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/717887b0a23d4068b2e2f21fdba1ef54 is 175, key is test_row_0/A:col10/1732778519538/Put/seqid=0 2024-11-28T07:22:00,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742060_1236 (size=31937) 2024-11-28T07:22:00,955 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/717887b0a23d4068b2e2f21fdba1ef54 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/717887b0a23d4068b2e2f21fdba1ef54 2024-11-28T07:22:00,968 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:00,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-28T07:22:00,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:00,969 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-28T07:22:00,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:22:00,969 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/A of 8dbe93101666996632a420c7c97b42e1 into 717887b0a23d4068b2e2f21fdba1ef54(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:00,969 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:00,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:00,969 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/A, priority=13, startTime=1732778520862; duration=0sec 2024-11-28T07:22:00,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:22:00,969 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:00,969 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:A 2024-11-28T07:22:00,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:00,969 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:00,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:22:00,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:00,971 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:00,971 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/C is initiating minor compaction (all files) 2024-11-28T07:22:00,971 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/C in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:00,971 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/89ece376586d48b78e45878dade5f32e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b9181907be384567bfdbda2c1016a6a3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/da583e07fc854b9895de4b4e1e97fc7d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.5 K 2024-11-28T07:22:00,972 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89ece376586d48b78e45878dade5f32e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778516627 2024-11-28T07:22:00,974 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9181907be384567bfdbda2c1016a6a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732778517259 2024-11-28T07:22:00,975 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting da583e07fc854b9895de4b4e1e97fc7d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732778518420 2024-11-28T07:22:00,997 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#C#compaction#199 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:00,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f36b14e9d0424905a3643ef60d77e723_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778519567/Put/seqid=0 2024-11-28T07:22:00,998 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/22dea93b3bc04d7fbac37ed24ece5ce0 is 50, key is test_row_0/C:col10/1732778519538/Put/seqid=0 2024-11-28T07:22:01,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742061_1237 (size=12983) 2024-11-28T07:22:01,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742062_1238 (size=12454) 2024-11-28T07:22:01,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:01,013 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f36b14e9d0424905a3643ef60d77e723_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f36b14e9d0424905a3643ef60d77e723_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:01,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/acd0b3284cd44e1aa1fa8ec45f23fb78, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:01,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/acd0b3284cd44e1aa1fa8ec45f23fb78 is 175, key is test_row_0/A:col10/1732778519567/Put/seqid=0 2024-11-28T07:22:01,018 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/22dea93b3bc04d7fbac37ed24ece5ce0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/22dea93b3bc04d7fbac37ed24ece5ce0 2024-11-28T07:22:01,027 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/C of 8dbe93101666996632a420c7c97b42e1 into 22dea93b3bc04d7fbac37ed24ece5ce0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:01,027 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:01,027 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/C, priority=13, startTime=1732778520862; duration=0sec 2024-11-28T07:22:01,027 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:01,027 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:C 2024-11-28T07:22:01,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742063_1239 (size=31255) 2024-11-28T07:22:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-28T07:22:01,346 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/de0a4383f4ca4cab91c5a2f6e3d310a1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/de0a4383f4ca4cab91c5a2f6e3d310a1 2024-11-28T07:22:01,352 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/B of 8dbe93101666996632a420c7c97b42e1 into de0a4383f4ca4cab91c5a2f6e3d310a1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:01,352 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:01,352 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/B, priority=13, startTime=1732778520862; duration=0sec 2024-11-28T07:22:01,352 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:01,352 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:B 2024-11-28T07:22:01,455 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=314, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/acd0b3284cd44e1aa1fa8ec45f23fb78 2024-11-28T07:22:01,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/15615ed985e24d77ac151121716d1aa8 is 50, key is test_row_0/B:col10/1732778519567/Put/seqid=0 2024-11-28T07:22:01,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742064_1240 (size=12301) 2024-11-28T07:22:01,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-28T07:22:01,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:01,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:01,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:01,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778581716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:01,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:01,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778581717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:01,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:01,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778581819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:01,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:01,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778581819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:01,869 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/15615ed985e24d77ac151121716d1aa8 2024-11-28T07:22:01,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/c1d5f8eb8fcb4769a9d29dd875b20d40 is 50, key is test_row_0/C:col10/1732778519567/Put/seqid=0 2024-11-28T07:22:01,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742065_1241 (size=12301) 2024-11-28T07:22:01,885 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/c1d5f8eb8fcb4769a9d29dd875b20d40 2024-11-28T07:22:01,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/acd0b3284cd44e1aa1fa8ec45f23fb78 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/acd0b3284cd44e1aa1fa8ec45f23fb78 2024-11-28T07:22:01,899 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/acd0b3284cd44e1aa1fa8ec45f23fb78, entries=150, sequenceid=314, filesize=30.5 K 2024-11-28T07:22:01,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/15615ed985e24d77ac151121716d1aa8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/15615ed985e24d77ac151121716d1aa8 2024-11-28T07:22:01,906 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/15615ed985e24d77ac151121716d1aa8, entries=150, sequenceid=314, filesize=12.0 K 2024-11-28T07:22:01,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/c1d5f8eb8fcb4769a9d29dd875b20d40 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/c1d5f8eb8fcb4769a9d29dd875b20d40 2024-11-28T07:22:01,914 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/c1d5f8eb8fcb4769a9d29dd875b20d40, entries=150, sequenceid=314, filesize=12.0 K 2024-11-28T07:22:01,915 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 8dbe93101666996632a420c7c97b42e1 in 947ms, sequenceid=314, compaction requested=false 2024-11-28T07:22:01,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:01,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:01,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-28T07:22:01,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-28T07:22:01,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-28T07:22:01,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4070 sec 2024-11-28T07:22:01,919 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.4120 sec 2024-11-28T07:22:02,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:02,024 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-28T07:22:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:22:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:22:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:22:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:02,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d2a78e4fb04940c0b81352c67b669119_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778522022/Put/seqid=0 2024-11-28T07:22:02,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742066_1242 (size=14994) 2024-11-28T07:22:02,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:02,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778582044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:02,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:02,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778582045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:02,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:02,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778582146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:02,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:02,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778582148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:02,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:02,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778582349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:02,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:02,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778582351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:02,442 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:02,447 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d2a78e4fb04940c0b81352c67b669119_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128d2a78e4fb04940c0b81352c67b669119_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:02,464 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/4d6050c446ef45bba0435d03c9ca842a, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:02,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/4d6050c446ef45bba0435d03c9ca842a is 175, key is test_row_0/A:col10/1732778522022/Put/seqid=0 2024-11-28T07:22:02,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742067_1243 (size=39949) 2024-11-28T07:22:02,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-28T07:22:02,613 INFO [Thread-744 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-28T07:22:02,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:02,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-28T07:22:02,616 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:02,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T07:22:02,617 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:02,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:02,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:02,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778582652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:02,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:02,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778582655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:02,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T07:22:02,768 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:02,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T07:22:02,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:02,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:02,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:02,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:02,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:02,879 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=335, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/4d6050c446ef45bba0435d03c9ca842a 2024-11-28T07:22:02,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/08f7f873ed5d4e21b0000fbf19d69a14 is 50, key is test_row_0/B:col10/1732778522022/Put/seqid=0 2024-11-28T07:22:02,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742068_1244 (size=12301) 2024-11-28T07:22:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T07:22:02,923 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:02,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T07:22:02,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:02,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:02,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:02,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:02,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:02,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,077 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:03,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T07:22:03,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:03,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778583157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:03,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778583159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T07:22:03,230 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:03,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T07:22:03,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:03,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,231 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/08f7f873ed5d4e21b0000fbf19d69a14 2024-11-28T07:22:03,318 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/f8fe8bfa86994a6a856c95e4e0df9b9e is 50, key is test_row_0/C:col10/1732778522022/Put/seqid=0 2024-11-28T07:22:03,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742069_1245 (size=12301) 2024-11-28T07:22:03,383 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:03,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T07:22:03,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:03,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,541 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:03,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T07:22:03,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:03,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,694 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:03,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T07:22:03,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:03,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:03,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T07:22:03,723 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/f8fe8bfa86994a6a856c95e4e0df9b9e 2024-11-28T07:22:03,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/4d6050c446ef45bba0435d03c9ca842a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/4d6050c446ef45bba0435d03c9ca842a 2024-11-28T07:22:03,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/4d6050c446ef45bba0435d03c9ca842a, entries=200, sequenceid=335, filesize=39.0 K 2024-11-28T07:22:03,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/08f7f873ed5d4e21b0000fbf19d69a14 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/08f7f873ed5d4e21b0000fbf19d69a14 2024-11-28T07:22:03,746 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/08f7f873ed5d4e21b0000fbf19d69a14, entries=150, sequenceid=335, filesize=12.0 K 2024-11-28T07:22:03,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/f8fe8bfa86994a6a856c95e4e0df9b9e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f8fe8bfa86994a6a856c95e4e0df9b9e 2024-11-28T07:22:03,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f8fe8bfa86994a6a856c95e4e0df9b9e, entries=150, sequenceid=335, filesize=12.0 K 2024-11-28T07:22:03,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 8dbe93101666996632a420c7c97b42e1 in 1730ms, sequenceid=335, compaction requested=true 2024-11-28T07:22:03,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:03,754 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:03,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:03,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:03,754 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:03,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:03,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:03,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:03,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:03,755 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:03,755 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/A is initiating minor compaction (all files) 2024-11-28T07:22:03,755 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/A in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,755 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/717887b0a23d4068b2e2f21fdba1ef54, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/acd0b3284cd44e1aa1fa8ec45f23fb78, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/4d6050c446ef45bba0435d03c9ca842a] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=100.7 K 2024-11-28T07:22:03,756 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,756 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/717887b0a23d4068b2e2f21fdba1ef54, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/acd0b3284cd44e1aa1fa8ec45f23fb78, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/4d6050c446ef45bba0435d03c9ca842a] 2024-11-28T07:22:03,756 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:03,756 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/B is initiating minor compaction (all files) 2024-11-28T07:22:03,756 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/B in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,756 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/de0a4383f4ca4cab91c5a2f6e3d310a1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/15615ed985e24d77ac151121716d1aa8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/08f7f873ed5d4e21b0000fbf19d69a14] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.7 K 2024-11-28T07:22:03,757 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 717887b0a23d4068b2e2f21fdba1ef54, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732778518420 2024-11-28T07:22:03,757 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting de0a4383f4ca4cab91c5a2f6e3d310a1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732778518420 2024-11-28T07:22:03,757 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting acd0b3284cd44e1aa1fa8ec45f23fb78, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732778519567 2024-11-28T07:22:03,758 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15615ed985e24d77ac151121716d1aa8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732778519567 2024-11-28T07:22:03,758 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d6050c446ef45bba0435d03c9ca842a, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732778521702 2024-11-28T07:22:03,758 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08f7f873ed5d4e21b0000fbf19d69a14, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732778521702 2024-11-28T07:22:03,768 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:03,778 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#B#compaction#206 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:03,779 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/368202dd898e4b33bfb5ed8dc0c2030c is 50, key is test_row_0/B:col10/1732778522022/Put/seqid=0 2024-11-28T07:22:03,780 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112807cb4a8664d94fdc8737ec91a6b37f15_8dbe93101666996632a420c7c97b42e1 store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:03,782 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112807cb4a8664d94fdc8737ec91a6b37f15_8dbe93101666996632a420c7c97b42e1, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:03,783 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112807cb4a8664d94fdc8737ec91a6b37f15_8dbe93101666996632a420c7c97b42e1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:03,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742070_1246 (size=4469) 2024-11-28T07:22:03,794 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#A#compaction#205 average throughput is 0.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:03,794 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/c9d269a954694383be2336a291b2c126 is 175, key is test_row_0/A:col10/1732778522022/Put/seqid=0 2024-11-28T07:22:03,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742071_1247 (size=13085) 2024-11-28T07:22:03,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742072_1248 (size=32039) 2024-11-28T07:22:03,845 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/368202dd898e4b33bfb5ed8dc0c2030c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/368202dd898e4b33bfb5ed8dc0c2030c 2024-11-28T07:22:03,852 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:03,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T07:22:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,853 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-28T07:22:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:22:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:22:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:22:03,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:03,854 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/c9d269a954694383be2336a291b2c126 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/c9d269a954694383be2336a291b2c126 2024-11-28T07:22:03,864 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/B of 8dbe93101666996632a420c7c97b42e1 into 368202dd898e4b33bfb5ed8dc0c2030c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:03,865 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:03,865 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/B, priority=13, startTime=1732778523754; duration=0sec 2024-11-28T07:22:03,865 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:03,865 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:B 2024-11-28T07:22:03,865 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:03,866 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/A of 8dbe93101666996632a420c7c97b42e1 into c9d269a954694383be2336a291b2c126(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:03,866 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:03,866 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/A, priority=13, startTime=1732778523753; duration=0sec 2024-11-28T07:22:03,866 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:03,866 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:A 2024-11-28T07:22:03,867 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:03,867 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/C is initiating minor compaction (all files) 2024-11-28T07:22:03,867 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/C in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:03,867 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/22dea93b3bc04d7fbac37ed24ece5ce0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/c1d5f8eb8fcb4769a9d29dd875b20d40, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f8fe8bfa86994a6a856c95e4e0df9b9e] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.7 K 2024-11-28T07:22:03,868 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22dea93b3bc04d7fbac37ed24ece5ce0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732778518420 2024-11-28T07:22:03,868 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1d5f8eb8fcb4769a9d29dd875b20d40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732778519567 2024-11-28T07:22:03,869 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8fe8bfa86994a6a856c95e4e0df9b9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732778521702 2024-11-28T07:22:03,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128fc0c869b7573406695ae0fb47d4efe14_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778522044/Put/seqid=0 2024-11-28T07:22:03,881 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#C#compaction#208 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:03,881 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/4260acc7d6fb4c61b5ae7f4571f87176 is 50, key is test_row_0/C:col10/1732778522022/Put/seqid=0 2024-11-28T07:22:03,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742073_1249 (size=12454) 2024-11-28T07:22:03,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:03,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742074_1250 (size=13085) 2024-11-28T07:22:03,901 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128fc0c869b7573406695ae0fb47d4efe14_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128fc0c869b7573406695ae0fb47d4efe14_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:03,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/a2be64df0a90409abe4cad06aa3b6057, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:03,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/a2be64df0a90409abe4cad06aa3b6057 is 175, key is test_row_0/A:col10/1732778522044/Put/seqid=0 2024-11-28T07:22:03,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742075_1251 (size=31255) 2024-11-28T07:22:04,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:04,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:04,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778584184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:04,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778584186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:04,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778584288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:04,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778584291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:04,303 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/4260acc7d6fb4c61b5ae7f4571f87176 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4260acc7d6fb4c61b5ae7f4571f87176 2024-11-28T07:22:04,307 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=352, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/a2be64df0a90409abe4cad06aa3b6057 2024-11-28T07:22:04,310 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/C of 8dbe93101666996632a420c7c97b42e1 into 4260acc7d6fb4c61b5ae7f4571f87176(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:04,310 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:04,311 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/C, priority=13, startTime=1732778523754; duration=0sec 2024-11-28T07:22:04,311 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:04,311 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:C 2024-11-28T07:22:04,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/a6ac97db846147b59f587a893e3d185c is 50, key is test_row_0/B:col10/1732778522044/Put/seqid=0 2024-11-28T07:22:04,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742076_1252 (size=12301) 2024-11-28T07:22:04,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778584491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:04,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778584493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:04,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T07:22:04,731 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/a6ac97db846147b59f587a893e3d185c 2024-11-28T07:22:04,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/82bcbf74854e42d195922a95b1eefabf is 50, key is test_row_0/C:col10/1732778522044/Put/seqid=0 2024-11-28T07:22:04,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742077_1253 (size=12301) 2024-11-28T07:22:04,755 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/82bcbf74854e42d195922a95b1eefabf 2024-11-28T07:22:04,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/a2be64df0a90409abe4cad06aa3b6057 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/a2be64df0a90409abe4cad06aa3b6057 2024-11-28T07:22:04,765 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/a2be64df0a90409abe4cad06aa3b6057, entries=150, sequenceid=352, filesize=30.5 K 2024-11-28T07:22:04,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/a6ac97db846147b59f587a893e3d185c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a6ac97db846147b59f587a893e3d185c 2024-11-28T07:22:04,773 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a6ac97db846147b59f587a893e3d185c, entries=150, sequenceid=352, filesize=12.0 K 2024-11-28T07:22:04,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/82bcbf74854e42d195922a95b1eefabf as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/82bcbf74854e42d195922a95b1eefabf 2024-11-28T07:22:04,779 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/82bcbf74854e42d195922a95b1eefabf, entries=150, sequenceid=352, filesize=12.0 K 2024-11-28T07:22:04,780 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 8dbe93101666996632a420c7c97b42e1 in 927ms, sequenceid=352, compaction requested=false 2024-11-28T07:22:04,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:04,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:04,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-28T07:22:04,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-28T07:22:04,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-28T07:22:04,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1650 sec 2024-11-28T07:22:04,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.1710 sec 2024-11-28T07:22:04,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:04,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-28T07:22:04,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:22:04,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:04,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:22:04,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:04,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:22:04,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:04,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112853f69fb3b2a841f296e4449b467c6f80_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778524797/Put/seqid=0 2024-11-28T07:22:04,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778584820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:04,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778584821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:04,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742078_1254 (size=12454) 2024-11-28T07:22:04,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778584923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:04,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778584925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:05,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:05,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778585127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:05,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:05,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778585128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:05,247 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:05,252 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112853f69fb3b2a841f296e4449b467c6f80_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112853f69fb3b2a841f296e4449b467c6f80_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:05,253 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/40674ed0f2cd4c44b18734b9f466e680, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:05,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/40674ed0f2cd4c44b18734b9f466e680 is 175, key is test_row_0/A:col10/1732778524797/Put/seqid=0 2024-11-28T07:22:05,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742079_1255 (size=31255) 2024-11-28T07:22:05,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:05,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778585430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:05,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:05,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778585433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:05,660 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=376, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/40674ed0f2cd4c44b18734b9f466e680 2024-11-28T07:22:05,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/fc1e7b59ee2a49749c66426cc7d70db1 is 50, key is test_row_0/B:col10/1732778524797/Put/seqid=0 2024-11-28T07:22:05,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742080_1256 (size=12301) 2024-11-28T07:22:05,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/fc1e7b59ee2a49749c66426cc7d70db1 2024-11-28T07:22:05,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/68ec572a30ad4d8399570dd2086339d4 is 50, key is test_row_0/C:col10/1732778524797/Put/seqid=0 2024-11-28T07:22:05,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742081_1257 (size=12301) 2024-11-28T07:22:05,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:05,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778585936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:05,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:05,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778585938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:06,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/68ec572a30ad4d8399570dd2086339d4 2024-11-28T07:22:06,136 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/40674ed0f2cd4c44b18734b9f466e680 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/40674ed0f2cd4c44b18734b9f466e680 2024-11-28T07:22:06,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/40674ed0f2cd4c44b18734b9f466e680, entries=150, sequenceid=376, filesize=30.5 K 2024-11-28T07:22:06,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/fc1e7b59ee2a49749c66426cc7d70db1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/fc1e7b59ee2a49749c66426cc7d70db1 2024-11-28T07:22:06,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/fc1e7b59ee2a49749c66426cc7d70db1, entries=150, sequenceid=376, filesize=12.0 K 2024-11-28T07:22:06,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/68ec572a30ad4d8399570dd2086339d4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/68ec572a30ad4d8399570dd2086339d4 2024-11-28T07:22:06,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/68ec572a30ad4d8399570dd2086339d4, entries=150, sequenceid=376, filesize=12.0 K 2024-11-28T07:22:06,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 8dbe93101666996632a420c7c97b42e1 in 1359ms, sequenceid=376, compaction requested=true 2024-11-28T07:22:06,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:06,156 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:06,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:06,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:06,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:06,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:06,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:06,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:06,157 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:06,158 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:06,158 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/A is initiating minor compaction (all files) 2024-11-28T07:22:06,158 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:06,158 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/A in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:06,158 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/B is initiating minor compaction (all files) 2024-11-28T07:22:06,158 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/B in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:06,158 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/c9d269a954694383be2336a291b2c126, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/a2be64df0a90409abe4cad06aa3b6057, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/40674ed0f2cd4c44b18734b9f466e680] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=92.3 K 2024-11-28T07:22:06,158 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:06,158 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/368202dd898e4b33bfb5ed8dc0c2030c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a6ac97db846147b59f587a893e3d185c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/fc1e7b59ee2a49749c66426cc7d70db1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.8 K 2024-11-28T07:22:06,158 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/c9d269a954694383be2336a291b2c126, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/a2be64df0a90409abe4cad06aa3b6057, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/40674ed0f2cd4c44b18734b9f466e680] 2024-11-28T07:22:06,159 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9d269a954694383be2336a291b2c126, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732778521702 2024-11-28T07:22:06,159 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 368202dd898e4b33bfb5ed8dc0c2030c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732778521702 2024-11-28T07:22:06,159 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2be64df0a90409abe4cad06aa3b6057, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778522038 2024-11-28T07:22:06,159 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a6ac97db846147b59f587a893e3d185c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778522038 2024-11-28T07:22:06,159 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40674ed0f2cd4c44b18734b9f466e680, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732778524183 2024-11-28T07:22:06,159 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting fc1e7b59ee2a49749c66426cc7d70db1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732778524183 2024-11-28T07:22:06,167 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:06,168 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#B#compaction#214 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:06,168 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c023d7079eb143ba9866d15041be5027 is 50, key is test_row_0/B:col10/1732778524797/Put/seqid=0 2024-11-28T07:22:06,169 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128acef681093504865b0775e3931dac15c_8dbe93101666996632a420c7c97b42e1 store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:06,171 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128acef681093504865b0775e3931dac15c_8dbe93101666996632a420c7c97b42e1, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:06,171 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128acef681093504865b0775e3931dac15c_8dbe93101666996632a420c7c97b42e1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:06,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742083_1259 (size=4469) 2024-11-28T07:22:06,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742082_1258 (size=13187) 2024-11-28T07:22:06,200 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#A#compaction#215 average throughput is 0.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:06,201 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/e89e261afb7749fdbac842a535c1381f is 175, key is test_row_0/A:col10/1732778524797/Put/seqid=0 2024-11-28T07:22:06,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742084_1260 (size=32141) 2024-11-28T07:22:06,211 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/e89e261afb7749fdbac842a535c1381f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e89e261afb7749fdbac842a535c1381f 2024-11-28T07:22:06,217 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/A of 8dbe93101666996632a420c7c97b42e1 into e89e261afb7749fdbac842a535c1381f(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:06,217 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:06,217 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/A, priority=13, startTime=1732778526156; duration=0sec 2024-11-28T07:22:06,217 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:06,217 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:A 2024-11-28T07:22:06,217 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:06,220 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:06,220 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/C is initiating minor compaction (all files) 2024-11-28T07:22:06,220 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/C in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:06,220 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4260acc7d6fb4c61b5ae7f4571f87176, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/82bcbf74854e42d195922a95b1eefabf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/68ec572a30ad4d8399570dd2086339d4] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.8 K 2024-11-28T07:22:06,221 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4260acc7d6fb4c61b5ae7f4571f87176, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732778521702 2024-11-28T07:22:06,221 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82bcbf74854e42d195922a95b1eefabf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778522038 2024-11-28T07:22:06,222 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68ec572a30ad4d8399570dd2086339d4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732778524183 2024-11-28T07:22:06,228 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#C#compaction#216 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:06,229 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/ec13707412054f7bbbd497143a1c5dcc is 50, key is test_row_0/C:col10/1732778524797/Put/seqid=0 2024-11-28T07:22:06,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742085_1261 (size=13187) 2024-11-28T07:22:06,240 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/ec13707412054f7bbbd497143a1c5dcc as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/ec13707412054f7bbbd497143a1c5dcc 2024-11-28T07:22:06,245 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/C of 8dbe93101666996632a420c7c97b42e1 into ec13707412054f7bbbd497143a1c5dcc(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:06,245 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:06,245 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/C, priority=13, startTime=1732778526157; duration=0sec 2024-11-28T07:22:06,245 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:06,246 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:C 2024-11-28T07:22:06,606 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/c023d7079eb143ba9866d15041be5027 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c023d7079eb143ba9866d15041be5027 2024-11-28T07:22:06,612 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/B of 8dbe93101666996632a420c7c97b42e1 into c023d7079eb143ba9866d15041be5027(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:06,612 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:06,612 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/B, priority=13, startTime=1732778526157; duration=0sec 2024-11-28T07:22:06,612 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:06,612 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:B 2024-11-28T07:22:06,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T07:22:06,721 INFO [Thread-744 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-28T07:22:06,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:06,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-11-28T07:22:06,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T07:22:06,725 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:06,725 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:06,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:06,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T07:22:06,877 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:06,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-28T07:22:06,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:06,878 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:22:06,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:22:06,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:06,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:22:06,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:06,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:22:06,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:06,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112816b1cfb8be514ca693a8059a1b2498fe_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778524819/Put/seqid=0 2024-11-28T07:22:06,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742086_1262 (size=12454) 2024-11-28T07:22:06,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:06,896 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112816b1cfb8be514ca693a8059a1b2498fe_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112816b1cfb8be514ca693a8059a1b2498fe_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:06,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/92d905884be04ebf9ac8df72be8e3c5d, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:06,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/92d905884be04ebf9ac8df72be8e3c5d is 175, key is test_row_0/A:col10/1732778524819/Put/seqid=0 2024-11-28T07:22:06,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742087_1263 (size=31255) 2024-11-28T07:22:06,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:06,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:06,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:06,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778586966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:06,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:06,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778586967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T07:22:07,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778587069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778587069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778587271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778587272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,305 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=394, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/92d905884be04ebf9ac8df72be8e3c5d 2024-11-28T07:22:07,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/cb16fa79a3534bce86f19d8b586f869a is 50, key is test_row_0/B:col10/1732778524819/Put/seqid=0 2024-11-28T07:22:07,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742088_1264 (size=12301) 2024-11-28T07:22:07,319 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/cb16fa79a3534bce86f19d8b586f869a 2024-11-28T07:22:07,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T07:22:07,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/9f248162add447b3a74dd8a1b0684f0f is 50, key is test_row_0/C:col10/1732778524819/Put/seqid=0 2024-11-28T07:22:07,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742089_1265 (size=12301) 2024-11-28T07:22:07,335 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/9f248162add447b3a74dd8a1b0684f0f 2024-11-28T07:22:07,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/92d905884be04ebf9ac8df72be8e3c5d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/92d905884be04ebf9ac8df72be8e3c5d 2024-11-28T07:22:07,348 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/92d905884be04ebf9ac8df72be8e3c5d, entries=150, sequenceid=394, filesize=30.5 K 2024-11-28T07:22:07,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/cb16fa79a3534bce86f19d8b586f869a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cb16fa79a3534bce86f19d8b586f869a 2024-11-28T07:22:07,353 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cb16fa79a3534bce86f19d8b586f869a, entries=150, sequenceid=394, filesize=12.0 K 2024-11-28T07:22:07,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/9f248162add447b3a74dd8a1b0684f0f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/9f248162add447b3a74dd8a1b0684f0f 2024-11-28T07:22:07,359 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/9f248162add447b3a74dd8a1b0684f0f, entries=150, sequenceid=394, filesize=12.0 K 2024-11-28T07:22:07,360 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 8dbe93101666996632a420c7c97b42e1 in 482ms, sequenceid=394, compaction requested=false 2024-11-28T07:22:07,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:07,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:07,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-28T07:22:07,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-11-28T07:22:07,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-28T07:22:07,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 635 msec 2024-11-28T07:22:07,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 640 msec 2024-11-28T07:22:07,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:07,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T07:22:07,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:22:07,577 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:22:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:22:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:07,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112885f6606aae8a437aa6567e25938362c3_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778527576/Put/seqid=0 2024-11-28T07:22:07,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742090_1266 (size=12454) 2024-11-28T07:22:07,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778587593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778587595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778587696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778587698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T07:22:07,828 INFO [Thread-744 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-28T07:22:07,830 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-11-28T07:22:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-28T07:22:07,831 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:07,832 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:07,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:07,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52974 deadline: 1732778587889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,892 DEBUG [Thread-740 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18219 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:22:07,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778587901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778587901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52986 deadline: 1732778587908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,909 DEBUG [Thread-734 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18235 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:22:07,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52996 deadline: 1732778587920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:07,922 DEBUG [Thread-742 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18248 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:22:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-28T07:22:07,983 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:07,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:07,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:07,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:07,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:07,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:07,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:07,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:07,990 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:07,995 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112885f6606aae8a437aa6567e25938362c3_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112885f6606aae8a437aa6567e25938362c3_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:07,996 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/5af9076c0de94dbbbd30fca91c3e9363, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:07,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/5af9076c0de94dbbbd30fca91c3e9363 is 175, key is test_row_0/A:col10/1732778527576/Put/seqid=0 2024-11-28T07:22:08,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742091_1267 (size=31255) 2024-11-28T07:22:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-28T07:22:08,136 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:08,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:08,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:08,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,137 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778588203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:08,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778588205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:08,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:08,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:08,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:08,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,290 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,401 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=416, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/5af9076c0de94dbbbd30fca91c3e9363 2024-11-28T07:22:08,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/348d525fb1cb45119d7982f91970c9f8 is 50, key is test_row_0/B:col10/1732778527576/Put/seqid=0 2024-11-28T07:22:08,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742092_1268 (size=12301) 2024-11-28T07:22:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-28T07:22:08,443 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:08,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:08,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:08,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,444 DEBUG [Thread-749 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c7940d9 to 127.0.0.1:56318 2024-11-28T07:22:08,444 DEBUG [Thread-749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:08,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,446 DEBUG [Thread-747 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3b727d6e to 127.0.0.1:56318 2024-11-28T07:22:08,446 DEBUG [Thread-747 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:08,446 DEBUG [Thread-745 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22e911df to 127.0.0.1:56318 2024-11-28T07:22:08,446 DEBUG [Thread-745 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:08,448 DEBUG [Thread-751 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c38ee58 to 127.0.0.1:56318 2024-11-28T07:22:08,448 DEBUG [Thread-751 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:08,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:08,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:08,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:08,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1732778588707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:08,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52958 deadline: 1732778588708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:08,749 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:08,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:08,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:08,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/348d525fb1cb45119d7982f91970c9f8 2024-11-28T07:22:08,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/7cb550699e0f420fa84caca0fb26d17c is 50, key is test_row_0/C:col10/1732778527576/Put/seqid=0 2024-11-28T07:22:08,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742093_1269 (size=12301) 2024-11-28T07:22:08,901 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:08,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:08,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:08,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:08,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:08,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-28T07:22:09,054 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:09,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:09,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:09,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:09,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:09,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:09,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:09,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:09,207 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:09,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:09,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:09,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:09,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:09,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:09,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:09,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:09,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/7cb550699e0f420fa84caca0fb26d17c 2024-11-28T07:22:09,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/5af9076c0de94dbbbd30fca91c3e9363 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5af9076c0de94dbbbd30fca91c3e9363 2024-11-28T07:22:09,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5af9076c0de94dbbbd30fca91c3e9363, entries=150, sequenceid=416, filesize=30.5 K 2024-11-28T07:22:09,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/348d525fb1cb45119d7982f91970c9f8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/348d525fb1cb45119d7982f91970c9f8 2024-11-28T07:22:09,239 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/348d525fb1cb45119d7982f91970c9f8, entries=150, sequenceid=416, filesize=12.0 K 2024-11-28T07:22:09,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/7cb550699e0f420fa84caca0fb26d17c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/7cb550699e0f420fa84caca0fb26d17c 2024-11-28T07:22:09,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/7cb550699e0f420fa84caca0fb26d17c, entries=150, sequenceid=416, filesize=12.0 K 2024-11-28T07:22:09,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 8dbe93101666996632a420c7c97b42e1 in 1668ms, sequenceid=416, compaction requested=true 2024-11-28T07:22:09,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:09,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:09,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:09,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:09,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:09,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8dbe93101666996632a420c7c97b42e1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:09,245 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:09,245 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:09,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:09,246 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:09,246 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/B is initiating minor compaction (all files) 2024-11-28T07:22:09,246 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/B in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:09,246 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:09,246 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c023d7079eb143ba9866d15041be5027, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cb16fa79a3534bce86f19d8b586f869a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/348d525fb1cb45119d7982f91970c9f8] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.9 K 2024-11-28T07:22:09,246 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/A is initiating minor compaction (all files) 2024-11-28T07:22:09,246 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/A in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:09,247 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e89e261afb7749fdbac842a535c1381f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/92d905884be04ebf9ac8df72be8e3c5d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5af9076c0de94dbbbd30fca91c3e9363] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=92.4 K 2024-11-28T07:22:09,247 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:09,247 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c023d7079eb143ba9866d15041be5027, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732778524183 2024-11-28T07:22:09,247 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e89e261afb7749fdbac842a535c1381f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/92d905884be04ebf9ac8df72be8e3c5d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5af9076c0de94dbbbd30fca91c3e9363] 2024-11-28T07:22:09,247 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting cb16fa79a3534bce86f19d8b586f869a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732778524811 2024-11-28T07:22:09,247 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e89e261afb7749fdbac842a535c1381f, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732778524183 2024-11-28T07:22:09,247 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 348d525fb1cb45119d7982f91970c9f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732778526965 2024-11-28T07:22:09,247 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92d905884be04ebf9ac8df72be8e3c5d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732778524811 2024-11-28T07:22:09,248 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5af9076c0de94dbbbd30fca91c3e9363, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732778526965 2024-11-28T07:22:09,255 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#B#compaction#223 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:09,256 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/e373daf43a794a0bb51c6c09810cb647 is 50, key is test_row_0/B:col10/1732778527576/Put/seqid=0 2024-11-28T07:22:09,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742094_1270 (size=13289) 2024-11-28T07:22:09,264 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:09,266 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128d26b1c352fcc47ef8c3d9870d0e72a5f_8dbe93101666996632a420c7c97b42e1 store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:09,289 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128d26b1c352fcc47ef8c3d9870d0e72a5f_8dbe93101666996632a420c7c97b42e1, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:09,289 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d26b1c352fcc47ef8c3d9870d0e72a5f_8dbe93101666996632a420c7c97b42e1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:09,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742095_1271 (size=4469) 2024-11-28T07:22:09,360 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:09,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-28T07:22:09,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:09,361 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T07:22:09,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:22:09,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:09,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:22:09,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:09,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:22:09,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:09,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112827f4cfec8f81489c830b191cd8a9145b_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778527588/Put/seqid=0 2024-11-28T07:22:09,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742096_1272 (size=12454) 2024-11-28T07:22:09,664 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/e373daf43a794a0bb51c6c09810cb647 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/e373daf43a794a0bb51c6c09810cb647 2024-11-28T07:22:09,668 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/B of 8dbe93101666996632a420c7c97b42e1 into e373daf43a794a0bb51c6c09810cb647(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:09,668 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:09,668 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/B, priority=13, startTime=1732778529245; duration=0sec 2024-11-28T07:22:09,669 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:09,669 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:B 2024-11-28T07:22:09,669 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:09,669 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:09,669 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 8dbe93101666996632a420c7c97b42e1/C is initiating minor compaction (all files) 2024-11-28T07:22:09,669 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8dbe93101666996632a420c7c97b42e1/C in TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:09,670 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/ec13707412054f7bbbd497143a1c5dcc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/9f248162add447b3a74dd8a1b0684f0f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/7cb550699e0f420fa84caca0fb26d17c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp, totalSize=36.9 K 2024-11-28T07:22:09,670 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ec13707412054f7bbbd497143a1c5dcc, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732778524183 2024-11-28T07:22:09,670 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f248162add447b3a74dd8a1b0684f0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732778524811 2024-11-28T07:22:09,670 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cb550699e0f420fa84caca0fb26d17c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732778526965 2024-11-28T07:22:09,677 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#C#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:09,677 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/b69a5128e0d1447abe0769c075bf2147 is 50, key is test_row_0/C:col10/1732778527576/Put/seqid=0 2024-11-28T07:22:09,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742097_1273 (size=13289) 2024-11-28T07:22:09,685 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/b69a5128e0d1447abe0769c075bf2147 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b69a5128e0d1447abe0769c075bf2147 2024-11-28T07:22:09,689 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/C of 8dbe93101666996632a420c7c97b42e1 into b69a5128e0d1447abe0769c075bf2147(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:09,689 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:09,689 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/C, priority=13, startTime=1732778529245; duration=0sec 2024-11-28T07:22:09,689 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:09,689 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:C 2024-11-28T07:22:09,695 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8dbe93101666996632a420c7c97b42e1#A#compaction#224 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:09,695 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/451a5a3b7d874a16b78157441a0259be is 175, key is test_row_0/A:col10/1732778527576/Put/seqid=0 2024-11-28T07:22:09,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742098_1274 (size=32243) 2024-11-28T07:22:09,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:09,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. as already flushing 2024-11-28T07:22:09,711 DEBUG [Thread-738 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x454f1431 to 127.0.0.1:56318 2024-11-28T07:22:09,711 DEBUG [Thread-738 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:09,718 DEBUG [Thread-736 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b4bd1ba to 127.0.0.1:56318 2024-11-28T07:22:09,718 DEBUG [Thread-736 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:09,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:09,780 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112827f4cfec8f81489c830b191cd8a9145b_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112827f4cfec8f81489c830b191cd8a9145b_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:09,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/7d742484d6a04137951f559be1f40f2c, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:09,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/7d742484d6a04137951f559be1f40f2c is 175, key is test_row_0/A:col10/1732778527588/Put/seqid=0 2024-11-28T07:22:09,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742099_1275 (size=31255) 2024-11-28T07:22:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-28T07:22:10,104 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/451a5a3b7d874a16b78157441a0259be as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/451a5a3b7d874a16b78157441a0259be 2024-11-28T07:22:10,108 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8dbe93101666996632a420c7c97b42e1/A of 8dbe93101666996632a420c7c97b42e1 into 451a5a3b7d874a16b78157441a0259be(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:10,108 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:10,108 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1., storeName=8dbe93101666996632a420c7c97b42e1/A, priority=13, startTime=1732778529245; duration=0sec 2024-11-28T07:22:10,109 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:10,109 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8dbe93101666996632a420c7c97b42e1:A 2024-11-28T07:22:10,186 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=430, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/7d742484d6a04137951f559be1f40f2c 2024-11-28T07:22:10,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/bf5b2fdbb6ea4c23a6c08efa196ad658 is 50, key is test_row_0/B:col10/1732778527588/Put/seqid=0 2024-11-28T07:22:10,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742100_1276 (size=12301) 2024-11-28T07:22:10,599 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/bf5b2fdbb6ea4c23a6c08efa196ad658 2024-11-28T07:22:10,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/ffd6e647f4654659b3c5912c0dc8a81c is 50, key is test_row_0/C:col10/1732778527588/Put/seqid=0 2024-11-28T07:22:10,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742101_1277 (size=12301) 2024-11-28T07:22:11,011 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/ffd6e647f4654659b3c5912c0dc8a81c 2024-11-28T07:22:11,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/7d742484d6a04137951f559be1f40f2c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/7d742484d6a04137951f559be1f40f2c 2024-11-28T07:22:11,019 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/7d742484d6a04137951f559be1f40f2c, entries=150, sequenceid=430, filesize=30.5 K 2024-11-28T07:22:11,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/bf5b2fdbb6ea4c23a6c08efa196ad658 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/bf5b2fdbb6ea4c23a6c08efa196ad658 2024-11-28T07:22:11,024 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/bf5b2fdbb6ea4c23a6c08efa196ad658, entries=150, sequenceid=430, filesize=12.0 K 2024-11-28T07:22:11,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/ffd6e647f4654659b3c5912c0dc8a81c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/ffd6e647f4654659b3c5912c0dc8a81c 2024-11-28T07:22:11,028 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/ffd6e647f4654659b3c5912c0dc8a81c, entries=150, sequenceid=430, filesize=12.0 K 2024-11-28T07:22:11,029 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=13.42 KB/13740 for 8dbe93101666996632a420c7c97b42e1 in 1668ms, sequenceid=430, compaction requested=false 2024-11-28T07:22:11,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:11,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:11,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-11-28T07:22:11,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-11-28T07:22:11,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-28T07:22:11,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1980 sec 2024-11-28T07:22:11,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 3.2020 sec 2024-11-28T07:22:11,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-28T07:22:11,936 INFO [Thread-744 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-28T07:22:12,846 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T07:22:17,933 DEBUG [Thread-742 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x367f47f7 to 127.0.0.1:56318 2024-11-28T07:22:17,933 DEBUG [Thread-742 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:17,935 DEBUG [Thread-740 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x505d5ccd to 127.0.0.1:56318 2024-11-28T07:22:17,935 DEBUG [Thread-740 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:17,985 DEBUG [Thread-734 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79982672 to 127.0.0.1:56318 2024-11-28T07:22:17,985 DEBUG [Thread-734 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 10 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 166 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 146 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 10 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 9 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6166 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6078 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2643 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7929 rows 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2633 2024-11-28T07:22:17,985 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7899 rows 2024-11-28T07:22:17,985 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T07:22:17,985 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x118b007e to 127.0.0.1:56318 2024-11-28T07:22:17,986 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:17,988 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T07:22:17,989 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T07:22:17,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:17,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-28T07:22:17,993 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778537993"}]},"ts":"1732778537993"} 2024-11-28T07:22:17,994 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T07:22:17,996 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T07:22:17,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T07:22:17,998 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, UNASSIGN}] 2024-11-28T07:22:17,998 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, UNASSIGN 2024-11-28T07:22:17,999 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=8dbe93101666996632a420c7c97b42e1, regionState=CLOSING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:22:17,999 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41703 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=CLOSING, location=592d8b721726,33143,1732778474488, table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-28T07:22:18,000 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T07:22:18,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; CloseRegionProcedure 8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:22:18,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-28T07:22:18,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:18,152 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(124): Close 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:18,152 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T07:22:18,152 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1681): Closing 8dbe93101666996632a420c7c97b42e1, disabling compactions & flushes 2024-11-28T07:22:18,152 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:18,152 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:18,152 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. after waiting 0 ms 2024-11-28T07:22:18,152 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:18,152 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(2837): Flushing 8dbe93101666996632a420c7c97b42e1 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-28T07:22:18,152 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=A 2024-11-28T07:22:18,152 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:18,152 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=B 2024-11-28T07:22:18,152 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:18,153 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8dbe93101666996632a420c7c97b42e1, store=C 2024-11-28T07:22:18,153 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:18,161 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286fb845f4ca9b4990ac7099a1c43b8de7_8dbe93101666996632a420c7c97b42e1 is 50, key is test_row_0/A:col10/1732778537984/Put/seqid=0 2024-11-28T07:22:18,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742102_1278 (size=12454) 2024-11-28T07:22:18,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-28T07:22:18,569 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:18,574 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286fb845f4ca9b4990ac7099a1c43b8de7_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286fb845f4ca9b4990ac7099a1c43b8de7_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:18,575 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/603ea4f811214207b3c3e21688a47c41, store: [table=TestAcidGuarantees family=A region=8dbe93101666996632a420c7c97b42e1] 2024-11-28T07:22:18,575 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/603ea4f811214207b3c3e21688a47c41 is 175, key is test_row_0/A:col10/1732778537984/Put/seqid=0 2024-11-28T07:22:18,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742103_1279 (size=31255) 2024-11-28T07:22:18,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-28T07:22:18,615 DEBUG [master/592d8b721726:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-28T07:22:18,620 DEBUG [master/592d8b721726:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 29128ed80b74de0f148960cd93ceedac changed from -1.0 to 0.0, refreshing cache 2024-11-28T07:22:18,980 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=441, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/603ea4f811214207b3c3e21688a47c41 2024-11-28T07:22:18,987 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/66cfa6918a714978944e10ced9ef25d7 is 50, key is test_row_0/B:col10/1732778537984/Put/seqid=0 2024-11-28T07:22:18,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742104_1280 (size=12301) 2024-11-28T07:22:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-28T07:22:19,391 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/66cfa6918a714978944e10ced9ef25d7 2024-11-28T07:22:19,399 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/f16331bc326248e0b67e6ff93970afc0 is 50, key is test_row_0/C:col10/1732778537984/Put/seqid=0 2024-11-28T07:22:19,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742105_1281 (size=12301) 2024-11-28T07:22:19,804 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/f16331bc326248e0b67e6ff93970afc0 2024-11-28T07:22:19,809 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/A/603ea4f811214207b3c3e21688a47c41 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/603ea4f811214207b3c3e21688a47c41 2024-11-28T07:22:19,813 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/603ea4f811214207b3c3e21688a47c41, entries=150, sequenceid=441, filesize=30.5 K 2024-11-28T07:22:19,813 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/B/66cfa6918a714978944e10ced9ef25d7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/66cfa6918a714978944e10ced9ef25d7 2024-11-28T07:22:19,816 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/66cfa6918a714978944e10ced9ef25d7, entries=150, sequenceid=441, filesize=12.0 K 2024-11-28T07:22:19,817 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/.tmp/C/f16331bc326248e0b67e6ff93970afc0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f16331bc326248e0b67e6ff93970afc0 2024-11-28T07:22:19,820 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f16331bc326248e0b67e6ff93970afc0, entries=150, sequenceid=441, filesize=12.0 K 2024-11-28T07:22:19,821 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 8dbe93101666996632a420c7c97b42e1 in 1669ms, sequenceid=441, compaction requested=true 2024-11-28T07:22:19,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0bbface044214dedaf7fa43dc80d5720, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/d90ee3de98e547468ae7860ea0103ac0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0aef7bac3f71485594d3446a40a18ff8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e4bd8e2563e34adb811e70bda22a9471, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/fd10f151b2f849479d9e2dfc26a43a97, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/093112858f164f849d7c76a8866fe868, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffadf363beb041fd8227576cfdf033ac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/bfdf3666f4ab4e8e86dd17e591495f9b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0547e7672ee049b4877ca8cf7f3f6979, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/88340ba3c4fa415e9852157749acdd5c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/08e5ac5c951b46bc8dc645f4d09e540c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0312c53223264132974991b2fb55ac6f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ae495b8a9beb40d1a216fd0b9eea9660, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffa322011e0842b8997c9e8779ac67c8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/aea682038f20403485b1534bcddaa821, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ba0bede42de74697b0bf6eda5c6765cf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/f5626bbd2ba14622b187bae81efffb56, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/dc6c75b2f4724f6799271566bc95af58, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5c3ae25bd8174b099568950d3c15f602, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/717887b0a23d4068b2e2f21fdba1ef54, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/37982bfa7c23484baab4afc07a2c861d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/acd0b3284cd44e1aa1fa8ec45f23fb78, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/4d6050c446ef45bba0435d03c9ca842a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/c9d269a954694383be2336a291b2c126, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/a2be64df0a90409abe4cad06aa3b6057, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e89e261afb7749fdbac842a535c1381f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/40674ed0f2cd4c44b18734b9f466e680, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/92d905884be04ebf9ac8df72be8e3c5d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5af9076c0de94dbbbd30fca91c3e9363] to archive 2024-11-28T07:22:19,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:22:19,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0bbface044214dedaf7fa43dc80d5720 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0bbface044214dedaf7fa43dc80d5720 2024-11-28T07:22:19,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/d90ee3de98e547468ae7860ea0103ac0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/d90ee3de98e547468ae7860ea0103ac0 2024-11-28T07:22:19,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0aef7bac3f71485594d3446a40a18ff8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0aef7bac3f71485594d3446a40a18ff8 2024-11-28T07:22:19,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e4bd8e2563e34adb811e70bda22a9471 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e4bd8e2563e34adb811e70bda22a9471 2024-11-28T07:22:19,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/fd10f151b2f849479d9e2dfc26a43a97 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/fd10f151b2f849479d9e2dfc26a43a97 2024-11-28T07:22:19,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/093112858f164f849d7c76a8866fe868 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/093112858f164f849d7c76a8866fe868 2024-11-28T07:22:19,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffadf363beb041fd8227576cfdf033ac to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffadf363beb041fd8227576cfdf033ac 2024-11-28T07:22:19,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/bfdf3666f4ab4e8e86dd17e591495f9b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/bfdf3666f4ab4e8e86dd17e591495f9b 2024-11-28T07:22:19,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0547e7672ee049b4877ca8cf7f3f6979 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0547e7672ee049b4877ca8cf7f3f6979 2024-11-28T07:22:19,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/88340ba3c4fa415e9852157749acdd5c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/88340ba3c4fa415e9852157749acdd5c 2024-11-28T07:22:19,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/08e5ac5c951b46bc8dc645f4d09e540c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/08e5ac5c951b46bc8dc645f4d09e540c 2024-11-28T07:22:19,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0312c53223264132974991b2fb55ac6f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/0312c53223264132974991b2fb55ac6f 2024-11-28T07:22:19,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ae495b8a9beb40d1a216fd0b9eea9660 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ae495b8a9beb40d1a216fd0b9eea9660 2024-11-28T07:22:19,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffa322011e0842b8997c9e8779ac67c8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ffa322011e0842b8997c9e8779ac67c8 2024-11-28T07:22:19,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/aea682038f20403485b1534bcddaa821 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/aea682038f20403485b1534bcddaa821 2024-11-28T07:22:19,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ba0bede42de74697b0bf6eda5c6765cf to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/ba0bede42de74697b0bf6eda5c6765cf 2024-11-28T07:22:19,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/f5626bbd2ba14622b187bae81efffb56 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/f5626bbd2ba14622b187bae81efffb56 2024-11-28T07:22:19,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/dc6c75b2f4724f6799271566bc95af58 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/dc6c75b2f4724f6799271566bc95af58 2024-11-28T07:22:19,843 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5c3ae25bd8174b099568950d3c15f602 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5c3ae25bd8174b099568950d3c15f602 2024-11-28T07:22:19,844 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/717887b0a23d4068b2e2f21fdba1ef54 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/717887b0a23d4068b2e2f21fdba1ef54 2024-11-28T07:22:19,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/37982bfa7c23484baab4afc07a2c861d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/37982bfa7c23484baab4afc07a2c861d 2024-11-28T07:22:19,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/acd0b3284cd44e1aa1fa8ec45f23fb78 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/acd0b3284cd44e1aa1fa8ec45f23fb78 2024-11-28T07:22:19,848 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/4d6050c446ef45bba0435d03c9ca842a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/4d6050c446ef45bba0435d03c9ca842a 2024-11-28T07:22:19,849 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/c9d269a954694383be2336a291b2c126 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/c9d269a954694383be2336a291b2c126 2024-11-28T07:22:19,850 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/a2be64df0a90409abe4cad06aa3b6057 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/a2be64df0a90409abe4cad06aa3b6057 2024-11-28T07:22:19,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e89e261afb7749fdbac842a535c1381f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/e89e261afb7749fdbac842a535c1381f 2024-11-28T07:22:19,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/40674ed0f2cd4c44b18734b9f466e680 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/40674ed0f2cd4c44b18734b9f466e680 2024-11-28T07:22:19,853 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/92d905884be04ebf9ac8df72be8e3c5d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/92d905884be04ebf9ac8df72be8e3c5d 2024-11-28T07:22:19,854 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5af9076c0de94dbbbd30fca91c3e9363 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/5af9076c0de94dbbbd30fca91c3e9363 2024-11-28T07:22:19,855 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/57f06e404bdc432db64bcdf96c7b25d6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/e344ccd0af804bed811ba9e18abbebbc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/72d80f11c8e14fdcb045144bcf111510, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c523fb93c91a4eebbaff3aaed5edb651, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/73ffbd75c91f48fabbab1fe21942b96b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/b1dff85571da47409448a5fc4f7bad3b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/681a48eeb15845eeb813fc4b5d382fe2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/290faf943bf841fcb5d0d3cffd79c97f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/7d93c5f22bff4b8dad0297dc92e59617, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/546aa3f3eb4942f48e997dd62391dcc8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cd971456d5dc4a2eaeec7177982cf05a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c5038059ae674a30b6207ee99d4ac738, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c831909b64874d4f892ccbbc3e3011ba, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/69af8e2ebfc7413998c75260942c189e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/02acc0f83cba465ba39fe5a0acba67e5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a5eea05d18e44245a50c99d54bdfd89b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/16c5f124ab824cf0b1f2a4f07fb8b881, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a0af38e4d0724dae95db1bdf8d2f4677, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/1fcc6b8e31a1417386397d69967b3b8e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/de0a4383f4ca4cab91c5a2f6e3d310a1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/9dbb1fee18c3476abd05bbf4fff0fef5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/15615ed985e24d77ac151121716d1aa8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/368202dd898e4b33bfb5ed8dc0c2030c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/08f7f873ed5d4e21b0000fbf19d69a14, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a6ac97db846147b59f587a893e3d185c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c023d7079eb143ba9866d15041be5027, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/fc1e7b59ee2a49749c66426cc7d70db1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cb16fa79a3534bce86f19d8b586f869a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/348d525fb1cb45119d7982f91970c9f8] to archive 2024-11-28T07:22:19,856 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:22:19,857 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/57f06e404bdc432db64bcdf96c7b25d6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/57f06e404bdc432db64bcdf96c7b25d6 2024-11-28T07:22:19,858 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/e344ccd0af804bed811ba9e18abbebbc to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/e344ccd0af804bed811ba9e18abbebbc 2024-11-28T07:22:19,859 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/72d80f11c8e14fdcb045144bcf111510 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/72d80f11c8e14fdcb045144bcf111510 2024-11-28T07:22:19,860 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c523fb93c91a4eebbaff3aaed5edb651 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c523fb93c91a4eebbaff3aaed5edb651 2024-11-28T07:22:19,861 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/73ffbd75c91f48fabbab1fe21942b96b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/73ffbd75c91f48fabbab1fe21942b96b 2024-11-28T07:22:19,862 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/b1dff85571da47409448a5fc4f7bad3b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/b1dff85571da47409448a5fc4f7bad3b 2024-11-28T07:22:19,863 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/681a48eeb15845eeb813fc4b5d382fe2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/681a48eeb15845eeb813fc4b5d382fe2 2024-11-28T07:22:19,864 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/290faf943bf841fcb5d0d3cffd79c97f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/290faf943bf841fcb5d0d3cffd79c97f 2024-11-28T07:22:19,865 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/7d93c5f22bff4b8dad0297dc92e59617 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/7d93c5f22bff4b8dad0297dc92e59617 2024-11-28T07:22:19,865 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/546aa3f3eb4942f48e997dd62391dcc8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/546aa3f3eb4942f48e997dd62391dcc8 2024-11-28T07:22:19,866 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cd971456d5dc4a2eaeec7177982cf05a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cd971456d5dc4a2eaeec7177982cf05a 2024-11-28T07:22:19,868 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c5038059ae674a30b6207ee99d4ac738 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c5038059ae674a30b6207ee99d4ac738 2024-11-28T07:22:19,869 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c831909b64874d4f892ccbbc3e3011ba to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c831909b64874d4f892ccbbc3e3011ba 2024-11-28T07:22:19,870 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/69af8e2ebfc7413998c75260942c189e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/69af8e2ebfc7413998c75260942c189e 2024-11-28T07:22:19,871 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/02acc0f83cba465ba39fe5a0acba67e5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/02acc0f83cba465ba39fe5a0acba67e5 2024-11-28T07:22:19,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a5eea05d18e44245a50c99d54bdfd89b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a5eea05d18e44245a50c99d54bdfd89b 2024-11-28T07:22:19,872 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/16c5f124ab824cf0b1f2a4f07fb8b881 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/16c5f124ab824cf0b1f2a4f07fb8b881 2024-11-28T07:22:19,873 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a0af38e4d0724dae95db1bdf8d2f4677 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a0af38e4d0724dae95db1bdf8d2f4677 2024-11-28T07:22:19,874 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/1fcc6b8e31a1417386397d69967b3b8e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/1fcc6b8e31a1417386397d69967b3b8e 2024-11-28T07:22:19,875 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/de0a4383f4ca4cab91c5a2f6e3d310a1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/de0a4383f4ca4cab91c5a2f6e3d310a1 2024-11-28T07:22:19,877 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/9dbb1fee18c3476abd05bbf4fff0fef5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/9dbb1fee18c3476abd05bbf4fff0fef5 2024-11-28T07:22:19,878 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/15615ed985e24d77ac151121716d1aa8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/15615ed985e24d77ac151121716d1aa8 2024-11-28T07:22:19,879 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/368202dd898e4b33bfb5ed8dc0c2030c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/368202dd898e4b33bfb5ed8dc0c2030c 2024-11-28T07:22:19,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/08f7f873ed5d4e21b0000fbf19d69a14 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/08f7f873ed5d4e21b0000fbf19d69a14 2024-11-28T07:22:19,882 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a6ac97db846147b59f587a893e3d185c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/a6ac97db846147b59f587a893e3d185c 2024-11-28T07:22:19,883 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c023d7079eb143ba9866d15041be5027 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/c023d7079eb143ba9866d15041be5027 2024-11-28T07:22:19,885 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/fc1e7b59ee2a49749c66426cc7d70db1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/fc1e7b59ee2a49749c66426cc7d70db1 2024-11-28T07:22:19,886 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cb16fa79a3534bce86f19d8b586f869a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/cb16fa79a3534bce86f19d8b586f869a 2024-11-28T07:22:19,887 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/348d525fb1cb45119d7982f91970c9f8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/348d525fb1cb45119d7982f91970c9f8 2024-11-28T07:22:19,889 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/d0205a93515449c2820de19e5da912da, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/aa1dd51c5a7642ea863b571913ed6b69, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/680537bd8a8441e0a0cea606952ec3b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4cfd24ff791b4009b19bceb19ea49b76, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/86b24263dd1548ae8306c0b29dee5d07, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b8b1446aa91d4b4ab11e4f787fbb08cf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/391ebfd10278421f956567579ab8490c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/0583332f504d46e68968751b782e43da, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/eb7662a8aed047a7887689c779e8c7bd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/61fab40e9f50477881d5b64ae80e1614, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/62e361fc888f46b4865bbc2e2ba9dae2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/194d2669f0da4af48c06fe04ef84de8d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3d58c198624c4a1c9cd5416d90fb4e6d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f31880a9d7a94ce7bfc25a89190fd974, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/feef6bb144a0496ba183d0091fe63f57, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3b09476166bd46aea1c476c30cd504c7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/89ece376586d48b78e45878dade5f32e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3e0a28f000ec4d5aabb044a92cf719d3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b9181907be384567bfdbda2c1016a6a3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/22dea93b3bc04d7fbac37ed24ece5ce0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/da583e07fc854b9895de4b4e1e97fc7d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/c1d5f8eb8fcb4769a9d29dd875b20d40, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4260acc7d6fb4c61b5ae7f4571f87176, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f8fe8bfa86994a6a856c95e4e0df9b9e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/82bcbf74854e42d195922a95b1eefabf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/ec13707412054f7bbbd497143a1c5dcc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/68ec572a30ad4d8399570dd2086339d4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/9f248162add447b3a74dd8a1b0684f0f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/7cb550699e0f420fa84caca0fb26d17c] to archive 2024-11-28T07:22:19,890 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:22:19,892 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/d0205a93515449c2820de19e5da912da to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/d0205a93515449c2820de19e5da912da 2024-11-28T07:22:19,893 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/aa1dd51c5a7642ea863b571913ed6b69 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/aa1dd51c5a7642ea863b571913ed6b69 2024-11-28T07:22:19,894 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/680537bd8a8441e0a0cea606952ec3b5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/680537bd8a8441e0a0cea606952ec3b5 2024-11-28T07:22:19,895 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4cfd24ff791b4009b19bceb19ea49b76 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4cfd24ff791b4009b19bceb19ea49b76 2024-11-28T07:22:19,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/86b24263dd1548ae8306c0b29dee5d07 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/86b24263dd1548ae8306c0b29dee5d07 2024-11-28T07:22:19,898 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b8b1446aa91d4b4ab11e4f787fbb08cf to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b8b1446aa91d4b4ab11e4f787fbb08cf 2024-11-28T07:22:19,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/391ebfd10278421f956567579ab8490c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/391ebfd10278421f956567579ab8490c 2024-11-28T07:22:19,900 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/0583332f504d46e68968751b782e43da to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/0583332f504d46e68968751b782e43da 2024-11-28T07:22:19,901 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/eb7662a8aed047a7887689c779e8c7bd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/eb7662a8aed047a7887689c779e8c7bd 2024-11-28T07:22:19,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/61fab40e9f50477881d5b64ae80e1614 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/61fab40e9f50477881d5b64ae80e1614 2024-11-28T07:22:19,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/62e361fc888f46b4865bbc2e2ba9dae2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/62e361fc888f46b4865bbc2e2ba9dae2 2024-11-28T07:22:19,904 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/194d2669f0da4af48c06fe04ef84de8d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/194d2669f0da4af48c06fe04ef84de8d 2024-11-28T07:22:19,905 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3d58c198624c4a1c9cd5416d90fb4e6d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3d58c198624c4a1c9cd5416d90fb4e6d 2024-11-28T07:22:19,906 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f31880a9d7a94ce7bfc25a89190fd974 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f31880a9d7a94ce7bfc25a89190fd974 2024-11-28T07:22:19,907 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/feef6bb144a0496ba183d0091fe63f57 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/feef6bb144a0496ba183d0091fe63f57 2024-11-28T07:22:19,909 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3b09476166bd46aea1c476c30cd504c7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3b09476166bd46aea1c476c30cd504c7 2024-11-28T07:22:19,909 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/89ece376586d48b78e45878dade5f32e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/89ece376586d48b78e45878dade5f32e 2024-11-28T07:22:19,910 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3e0a28f000ec4d5aabb044a92cf719d3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/3e0a28f000ec4d5aabb044a92cf719d3 2024-11-28T07:22:19,911 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b9181907be384567bfdbda2c1016a6a3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b9181907be384567bfdbda2c1016a6a3 2024-11-28T07:22:19,912 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/22dea93b3bc04d7fbac37ed24ece5ce0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/22dea93b3bc04d7fbac37ed24ece5ce0 2024-11-28T07:22:19,913 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/da583e07fc854b9895de4b4e1e97fc7d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/da583e07fc854b9895de4b4e1e97fc7d 2024-11-28T07:22:19,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/c1d5f8eb8fcb4769a9d29dd875b20d40 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/c1d5f8eb8fcb4769a9d29dd875b20d40 2024-11-28T07:22:19,915 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4260acc7d6fb4c61b5ae7f4571f87176 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/4260acc7d6fb4c61b5ae7f4571f87176 2024-11-28T07:22:19,916 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f8fe8bfa86994a6a856c95e4e0df9b9e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f8fe8bfa86994a6a856c95e4e0df9b9e 2024-11-28T07:22:19,917 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/82bcbf74854e42d195922a95b1eefabf to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/82bcbf74854e42d195922a95b1eefabf 2024-11-28T07:22:19,918 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/ec13707412054f7bbbd497143a1c5dcc to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/ec13707412054f7bbbd497143a1c5dcc 2024-11-28T07:22:19,919 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/68ec572a30ad4d8399570dd2086339d4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/68ec572a30ad4d8399570dd2086339d4 2024-11-28T07:22:19,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/9f248162add447b3a74dd8a1b0684f0f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/9f248162add447b3a74dd8a1b0684f0f 2024-11-28T07:22:19,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/7cb550699e0f420fa84caca0fb26d17c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/7cb550699e0f420fa84caca0fb26d17c 2024-11-28T07:22:19,925 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/recovered.edits/444.seqid, newMaxSeqId=444, maxSeqId=4 2024-11-28T07:22:19,926 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1. 2024-11-28T07:22:19,926 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1635): Region close journal for 8dbe93101666996632a420c7c97b42e1: 2024-11-28T07:22:19,927 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(170): Closed 8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:19,927 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=8dbe93101666996632a420c7c97b42e1, regionState=CLOSED 2024-11-28T07:22:19,929 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-28T07:22:19,929 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; CloseRegionProcedure 8dbe93101666996632a420c7c97b42e1, server=592d8b721726,33143,1732778474488 in 1.9280 sec 2024-11-28T07:22:19,930 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-28T07:22:19,930 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8dbe93101666996632a420c7c97b42e1, UNASSIGN in 1.9310 sec 2024-11-28T07:22:19,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-28T07:22:19,932 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9340 sec 2024-11-28T07:22:19,933 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778539932"}]},"ts":"1732778539932"} 2024-11-28T07:22:19,933 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T07:22:19,936 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T07:22:19,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9480 sec 2024-11-28T07:22:20,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-28T07:22:20,097 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-28T07:22:20,097 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T07:22:20,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:20,099 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:20,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-28T07:22:20,099 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=71, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:20,101 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,103 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/recovered.edits] 2024-11-28T07:22:20,106 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/451a5a3b7d874a16b78157441a0259be to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/451a5a3b7d874a16b78157441a0259be 2024-11-28T07:22:20,107 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/603ea4f811214207b3c3e21688a47c41 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/603ea4f811214207b3c3e21688a47c41 2024-11-28T07:22:20,109 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/7d742484d6a04137951f559be1f40f2c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/A/7d742484d6a04137951f559be1f40f2c 2024-11-28T07:22:20,111 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/66cfa6918a714978944e10ced9ef25d7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/66cfa6918a714978944e10ced9ef25d7 2024-11-28T07:22:20,112 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/bf5b2fdbb6ea4c23a6c08efa196ad658 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/bf5b2fdbb6ea4c23a6c08efa196ad658 2024-11-28T07:22:20,113 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/e373daf43a794a0bb51c6c09810cb647 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/B/e373daf43a794a0bb51c6c09810cb647 2024-11-28T07:22:20,115 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b69a5128e0d1447abe0769c075bf2147 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/b69a5128e0d1447abe0769c075bf2147 2024-11-28T07:22:20,117 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f16331bc326248e0b67e6ff93970afc0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/f16331bc326248e0b67e6ff93970afc0 2024-11-28T07:22:20,118 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/ffd6e647f4654659b3c5912c0dc8a81c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/C/ffd6e647f4654659b3c5912c0dc8a81c 2024-11-28T07:22:20,120 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/recovered.edits/444.seqid to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1/recovered.edits/444.seqid 2024-11-28T07:22:20,121 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,121 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T07:22:20,121 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T07:22:20,122 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-28T07:22:20,126 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112803dd37a897004a53946931aa2f6f3315_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112803dd37a897004a53946931aa2f6f3315_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,128 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112807d9d81ebaef49bca0fd960d2b1f673e_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112807d9d81ebaef49bca0fd960d2b1f673e_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,129 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112816b1cfb8be514ca693a8059a1b2498fe_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112816b1cfb8be514ca693a8059a1b2498fe_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,130 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112827f4cfec8f81489c830b191cd8a9145b_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112827f4cfec8f81489c830b191cd8a9145b_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,131 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112846e3abdbb6924be19d81d170d9121383_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112846e3abdbb6924be19d81d170d9121383_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,132 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112853f69fb3b2a841f296e4449b467c6f80_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112853f69fb3b2a841f296e4449b467c6f80_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,134 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285e5af19f1cf144bdbc2c390e538bf83a_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285e5af19f1cf144bdbc2c390e538bf83a_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,135 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286ca4a582550449d2ad8f7778ef4cbc9c_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286ca4a582550449d2ad8f7778ef4cbc9c_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,136 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286fb845f4ca9b4990ac7099a1c43b8de7_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286fb845f4ca9b4990ac7099a1c43b8de7_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,137 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128782218777b714540b1f5202af8d7812e_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128782218777b714540b1f5202af8d7812e_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,138 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112885f6606aae8a437aa6567e25938362c3_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112885f6606aae8a437aa6567e25938362c3_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,140 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112894762f6e382b434f88069b618bcc6ab7_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112894762f6e382b434f88069b618bcc6ab7_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,141 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289d8431e930f340b692313b72dee40dcd_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289d8431e930f340b692313b72dee40dcd_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,142 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128a5870a44ba084451b269c5779c034eb4_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128a5870a44ba084451b269c5779c034eb4_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,144 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b5f8310c32934cb08449a58e4e5e49da_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b5f8310c32934cb08449a58e4e5e49da_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,145 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128befb5c1eedba453e95b0554a03057056_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128befb5c1eedba453e95b0554a03057056_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,146 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c3af4cb6b4ec4dc39dfdadc2744adaf0_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c3af4cb6b4ec4dc39dfdadc2744adaf0_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,147 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128d2a78e4fb04940c0b81352c67b669119_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128d2a78e4fb04940c0b81352c67b669119_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,148 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128e66d6630b51b4c7497db713af1b7d27c_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128e66d6630b51b4c7497db713af1b7d27c_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,150 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128e7bc8438bde64eb298775d14f8027a58_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128e7bc8438bde64eb298775d14f8027a58_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,151 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f3471022df8e4b9393272ea951030202_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f3471022df8e4b9393272ea951030202_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,153 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f36b14e9d0424905a3643ef60d77e723_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f36b14e9d0424905a3643ef60d77e723_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,154 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128fc0c869b7573406695ae0fb47d4efe14_8dbe93101666996632a420c7c97b42e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128fc0c869b7573406695ae0fb47d4efe14_8dbe93101666996632a420c7c97b42e1 2024-11-28T07:22:20,155 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T07:22:20,158 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=71, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:20,160 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T07:22:20,163 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T07:22:20,164 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=71, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:20,164 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T07:22:20,164 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732778540164"}]},"ts":"9223372036854775807"} 2024-11-28T07:22:20,168 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T07:22:20,168 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8dbe93101666996632a420c7c97b42e1, NAME => 'TestAcidGuarantees,,1732778505330.8dbe93101666996632a420c7c97b42e1.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T07:22:20,168 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T07:22:20,168 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732778540168"}]},"ts":"9223372036854775807"} 2024-11-28T07:22:20,171 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T07:22:20,173 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=71, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:20,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 76 msec 2024-11-28T07:22:20,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-28T07:22:20,200 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-28T07:22:20,212 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=238 (was 241), OpenFileDescriptor=451 (was 460), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=438 (was 339) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5150 (was 4996) - AvailableMemoryMB LEAK? - 2024-11-28T07:22:20,223 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=238, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=438, ProcessCount=11, AvailableMemoryMB=5149 2024-11-28T07:22:20,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T07:22:20,225 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:22:20,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:20,227 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T07:22:20,227 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:20,227 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 72 2024-11-28T07:22:20,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-28T07:22:20,228 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T07:22:20,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742106_1282 (size=960) 2024-11-28T07:22:20,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-28T07:22:20,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-28T07:22:20,636 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e 2024-11-28T07:22:20,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742107_1283 (size=53) 2024-11-28T07:22:20,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-28T07:22:21,043 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:22:21,044 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 936414ebf397eefac328f959953a4d8e, disabling compactions & flushes 2024-11-28T07:22:21,044 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:21,044 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:21,044 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. after waiting 0 ms 2024-11-28T07:22:21,044 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:21,044 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:21,044 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:21,045 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T07:22:21,045 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732778541045"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732778541045"}]},"ts":"1732778541045"} 2024-11-28T07:22:21,046 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T07:22:21,047 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T07:22:21,047 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778541047"}]},"ts":"1732778541047"} 2024-11-28T07:22:21,048 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T07:22:21,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=936414ebf397eefac328f959953a4d8e, ASSIGN}] 2024-11-28T07:22:21,052 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=936414ebf397eefac328f959953a4d8e, ASSIGN 2024-11-28T07:22:21,053 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=936414ebf397eefac328f959953a4d8e, ASSIGN; state=OFFLINE, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=false 2024-11-28T07:22:21,204 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=936414ebf397eefac328f959953a4d8e, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:22:21,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; OpenRegionProcedure 936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:22:21,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-28T07:22:21,356 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:21,359 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:21,359 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7285): Opening region: {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:22:21,360 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:21,360 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:22:21,360 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7327): checking encryption for 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:21,360 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7330): checking classloading for 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:21,361 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:21,363 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:22:21,363 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 936414ebf397eefac328f959953a4d8e columnFamilyName A 2024-11-28T07:22:21,363 DEBUG [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:21,363 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] regionserver.HStore(327): Store=936414ebf397eefac328f959953a4d8e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:22:21,363 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:21,365 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:22:21,365 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 936414ebf397eefac328f959953a4d8e columnFamilyName B 2024-11-28T07:22:21,365 DEBUG [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:21,365 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] regionserver.HStore(327): Store=936414ebf397eefac328f959953a4d8e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:22:21,365 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:21,366 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:22:21,366 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 936414ebf397eefac328f959953a4d8e columnFamilyName C 2024-11-28T07:22:21,366 DEBUG [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:21,367 INFO [StoreOpener-936414ebf397eefac328f959953a4d8e-1 {}] regionserver.HStore(327): Store=936414ebf397eefac328f959953a4d8e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:22:21,367 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:21,367 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:21,368 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:21,369 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:22:21,370 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1085): writing seq id for 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:21,372 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T07:22:21,372 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1102): Opened 936414ebf397eefac328f959953a4d8e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67875476, jitterRate=0.01142340898513794}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:22:21,373 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1001): Region open journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:21,374 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., pid=74, masterSystemTime=1732778541356 2024-11-28T07:22:21,375 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:21,375 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:21,375 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=936414ebf397eefac328f959953a4d8e, regionState=OPEN, openSeqNum=2, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:22:21,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-28T07:22:21,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; OpenRegionProcedure 936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 in 171 msec 2024-11-28T07:22:21,379 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=72 2024-11-28T07:22:21,379 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=72, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=936414ebf397eefac328f959953a4d8e, ASSIGN in 325 msec 2024-11-28T07:22:21,379 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T07:22:21,379 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778541379"}]},"ts":"1732778541379"} 2024-11-28T07:22:21,380 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T07:22:21,383 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T07:22:21,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1580 sec 2024-11-28T07:22:22,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-11-28T07:22:22,332 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 72 completed 2024-11-28T07:22:22,334 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a569490 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c1ac389 2024-11-28T07:22:22,337 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44645c55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,339 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,340 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49572, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,341 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T07:22:22,342 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57106, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T07:22:22,344 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6862e3ce to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28e73c0 2024-11-28T07:22:22,347 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ee0130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,348 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d296fed to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c480dfb 2024-11-28T07:22:22,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683b64c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,353 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-11-28T07:22:22,356 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,357 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43f04e0e to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e9ae050 2024-11-28T07:22:22,360 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a703d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,361 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560ec309 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef31f8 2024-11-28T07:22:22,363 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ed1e44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,364 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0eb04aeb to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72537a47 2024-11-28T07:22:22,367 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88aa519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,368 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-11-28T07:22:22,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,371 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-11-28T07:22:22,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,375 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e6bf6a to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@605827c9 2024-11-28T07:22:22,378 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1403c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,379 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1730a60f to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3677bd4f 2024-11-28T07:22:22,381 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf0ba59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:22,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:22,384 DEBUG [hconnection-0x15953560-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-28T07:22:22,386 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,386 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:22,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T07:22:22,387 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:22,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:22,390 DEBUG [hconnection-0x63ae2784-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,390 DEBUG [hconnection-0x3604c9c7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,390 DEBUG [hconnection-0x5b26abf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,391 DEBUG [hconnection-0x47571b80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,391 DEBUG [hconnection-0x2a871bba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,391 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,391 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49580, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,391 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49606, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,392 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49616, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,392 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49618, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,395 DEBUG [hconnection-0x391a311d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,395 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,396 DEBUG [hconnection-0x30ec4335-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,396 DEBUG [hconnection-0x3fdb6817-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,397 DEBUG [hconnection-0x48069a48-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:22,397 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49632, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,397 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49648, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,398 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:22,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:22,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:22:22,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:22,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:22,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:22,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:22,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:22,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:22,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778602420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778602420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778602422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778602422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778602425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/050fc5c35e9049aabc8d4566de9cd0b2 is 50, key is test_row_0/A:col10/1732778542399/Put/seqid=0 2024-11-28T07:22:22,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742108_1284 (size=12001) 2024-11-28T07:22:22,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T07:22:22,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778602526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778602526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778602527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778602527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778602530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,538 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:22,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T07:22:22,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:22,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:22,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:22,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T07:22:22,691 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:22,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T07:22:22,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:22,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:22,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:22,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778602728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778602729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778602730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778602732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:22,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778602732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:22,844 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:22,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T07:22:22,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:22,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:22,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:22,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/050fc5c35e9049aabc8d4566de9cd0b2 2024-11-28T07:22:22,900 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/52b305f5d93547b7beca575e280b2ab7 is 50, key is test_row_0/B:col10/1732778542399/Put/seqid=0 2024-11-28T07:22:22,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742109_1285 (size=12001) 2024-11-28T07:22:22,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T07:22:22,998 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:22,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T07:22:22,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:22,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:22,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:22,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:22,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:23,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778603032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778603035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778603035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778603035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778603047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:23,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T07:22:23,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:23,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:23,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:23,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:23,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:23,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:23,305 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:23,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T07:22:23,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:23,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:23,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:23,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:23,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:23,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:23,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/52b305f5d93547b7beca575e280b2ab7 2024-11-28T07:22:23,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/e55c52651dfd445fa329ff5d97d9c70f is 50, key is test_row_0/C:col10/1732778542399/Put/seqid=0 2024-11-28T07:22:23,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742110_1286 (size=12001) 2024-11-28T07:22:23,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/e55c52651dfd445fa329ff5d97d9c70f 2024-11-28T07:22:23,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/050fc5c35e9049aabc8d4566de9cd0b2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/050fc5c35e9049aabc8d4566de9cd0b2 2024-11-28T07:22:23,447 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/050fc5c35e9049aabc8d4566de9cd0b2, entries=150, sequenceid=14, filesize=11.7 K 2024-11-28T07:22:23,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/52b305f5d93547b7beca575e280b2ab7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/52b305f5d93547b7beca575e280b2ab7 2024-11-28T07:22:23,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/52b305f5d93547b7beca575e280b2ab7, entries=150, sequenceid=14, filesize=11.7 K 2024-11-28T07:22:23,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/e55c52651dfd445fa329ff5d97d9c70f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e55c52651dfd445fa329ff5d97d9c70f 2024-11-28T07:22:23,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e55c52651dfd445fa329ff5d97d9c70f, entries=150, sequenceid=14, filesize=11.7 K 2024-11-28T07:22:23,460 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:23,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T07:22:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:23,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 936414ebf397eefac328f959953a4d8e in 1061ms, sequenceid=14, compaction requested=false 2024-11-28T07:22:23,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:23,461 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:22:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:23,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/df65d557a20745fe849da1ed1513e7c2 is 50, key is test_row_0/A:col10/1732778542421/Put/seqid=0 2024-11-28T07:22:23,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T07:22:23,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742111_1287 (size=12001) 2024-11-28T07:22:23,512 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/df65d557a20745fe849da1ed1513e7c2 2024-11-28T07:22:23,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/0ec23887574c461eb7bed12a03f94789 is 50, key is test_row_0/B:col10/1732778542421/Put/seqid=0 2024-11-28T07:22:23,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:23,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:23,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778603561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778603562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778603566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778603566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778603567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742112_1288 (size=12001) 2024-11-28T07:22:23,622 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/0ec23887574c461eb7bed12a03f94789 2024-11-28T07:22:23,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1c9062eb85ad4400a1fb6b32f4a28a69 is 50, key is test_row_0/C:col10/1732778542421/Put/seqid=0 2024-11-28T07:22:23,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778603668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778603678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778603687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778603689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778603693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742113_1289 (size=12001) 2024-11-28T07:22:23,707 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1c9062eb85ad4400a1fb6b32f4a28a69 2024-11-28T07:22:23,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/df65d557a20745fe849da1ed1513e7c2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/df65d557a20745fe849da1ed1513e7c2 2024-11-28T07:22:23,718 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/df65d557a20745fe849da1ed1513e7c2, entries=150, sequenceid=38, filesize=11.7 K 2024-11-28T07:22:23,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/0ec23887574c461eb7bed12a03f94789 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0ec23887574c461eb7bed12a03f94789 2024-11-28T07:22:23,732 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0ec23887574c461eb7bed12a03f94789, entries=150, sequenceid=38, filesize=11.7 K 2024-11-28T07:22:23,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1c9062eb85ad4400a1fb6b32f4a28a69 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1c9062eb85ad4400a1fb6b32f4a28a69 2024-11-28T07:22:23,738 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1c9062eb85ad4400a1fb6b32f4a28a69, entries=150, sequenceid=38, filesize=11.7 K 2024-11-28T07:22:23,740 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 936414ebf397eefac328f959953a4d8e in 278ms, sequenceid=38, compaction requested=false 2024-11-28T07:22:23,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:23,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:23,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-28T07:22:23,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-28T07:22:23,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-28T07:22:23,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3540 sec 2024-11-28T07:22:23,746 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.3610 sec 2024-11-28T07:22:23,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:22:23,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:23,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:23,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/cd5afc7c1c3c4beba8f5e94fe2a991f4 is 50, key is test_row_0/A:col10/1732778543885/Put/seqid=0 2024-11-28T07:22:23,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742114_1290 (size=16681) 2024-11-28T07:22:23,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778603951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778603958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778603962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778603962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:23,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:23,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778603964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778604064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778604068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778604070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778604071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778604075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778604272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778604274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778604275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778604277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778604281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/cd5afc7c1c3c4beba8f5e94fe2a991f4 2024-11-28T07:22:24,381 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/af9470750bfd4fee851764640db4b708 is 50, key is test_row_0/B:col10/1732778543885/Put/seqid=0 2024-11-28T07:22:24,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742115_1291 (size=12001) 2024-11-28T07:22:24,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/af9470750bfd4fee851764640db4b708 2024-11-28T07:22:24,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1a7ed76a8c89442ea7fbde891539827d is 50, key is test_row_0/C:col10/1732778543885/Put/seqid=0 2024-11-28T07:22:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T07:22:24,492 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-28T07:22:24,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-28T07:22:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T07:22:24,496 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:24,497 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:24,497 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:24,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742116_1292 (size=12001) 2024-11-28T07:22:24,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1a7ed76a8c89442ea7fbde891539827d 2024-11-28T07:22:24,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/cd5afc7c1c3c4beba8f5e94fe2a991f4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cd5afc7c1c3c4beba8f5e94fe2a991f4 2024-11-28T07:22:24,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cd5afc7c1c3c4beba8f5e94fe2a991f4, entries=250, sequenceid=53, filesize=16.3 K 2024-11-28T07:22:24,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/af9470750bfd4fee851764640db4b708 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/af9470750bfd4fee851764640db4b708 2024-11-28T07:22:24,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/af9470750bfd4fee851764640db4b708, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T07:22:24,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1a7ed76a8c89442ea7fbde891539827d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1a7ed76a8c89442ea7fbde891539827d 2024-11-28T07:22:24,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1a7ed76a8c89442ea7fbde891539827d, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T07:22:24,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778604579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=134.18 KB/137400 for 936414ebf397eefac328f959953a4d8e in 700ms, sequenceid=53, compaction requested=true 2024-11-28T07:22:24,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:24,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:24,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:24,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:24,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:24,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:24,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:22:24,588 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:24,588 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:24,591 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:24,591 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:24,591 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:24,591 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:24,591 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:24,591 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:24,591 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/52b305f5d93547b7beca575e280b2ab7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0ec23887574c461eb7bed12a03f94789, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/af9470750bfd4fee851764640db4b708] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=35.2 K 2024-11-28T07:22:24,591 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/050fc5c35e9049aabc8d4566de9cd0b2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/df65d557a20745fe849da1ed1513e7c2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cd5afc7c1c3c4beba8f5e94fe2a991f4] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=39.7 K 2024-11-28T07:22:24,593 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 52b305f5d93547b7beca575e280b2ab7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732778542399 2024-11-28T07:22:24,593 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 050fc5c35e9049aabc8d4566de9cd0b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732778542399 2024-11-28T07:22:24,593 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ec23887574c461eb7bed12a03f94789, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732778542420 2024-11-28T07:22:24,594 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting df65d557a20745fe849da1ed1513e7c2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732778542420 2024-11-28T07:22:24,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:22:24,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:24,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:24,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:24,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:24,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:24,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:24,595 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting af9470750bfd4fee851764640db4b708, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778543564 2024-11-28T07:22:24,595 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd5afc7c1c3c4beba8f5e94fe2a991f4, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778543560 2024-11-28T07:22:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T07:22:24,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778604615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778604623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778604627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778604630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/cf2d566c458548beaf6d3e06b3f161a4 is 50, key is test_row_0/A:col10/1732778543946/Put/seqid=0 2024-11-28T07:22:24,649 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#242 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:24,650 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/95af2acc507a4469813f13eb933c6ab8 is 50, key is test_row_0/A:col10/1732778543885/Put/seqid=0 2024-11-28T07:22:24,650 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:24,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T07:22:24,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:24,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:24,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:24,651 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:24,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:24,656 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#243 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:24,657 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/9bb86575e6514d0ba1ae67769b1114c0 is 50, key is test_row_0/B:col10/1732778543885/Put/seqid=0 2024-11-28T07:22:24,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742118_1294 (size=12104) 2024-11-28T07:22:24,715 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/95af2acc507a4469813f13eb933c6ab8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/95af2acc507a4469813f13eb933c6ab8 2024-11-28T07:22:24,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742117_1293 (size=14341) 2024-11-28T07:22:24,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/cf2d566c458548beaf6d3e06b3f161a4 2024-11-28T07:22:24,723 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into 95af2acc507a4469813f13eb933c6ab8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:24,723 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:24,723 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=13, startTime=1732778544588; duration=0sec 2024-11-28T07:22:24,723 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:24,723 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:24,723 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:24,724 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:24,724 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:24,724 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:24,725 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e55c52651dfd445fa329ff5d97d9c70f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1c9062eb85ad4400a1fb6b32f4a28a69, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1a7ed76a8c89442ea7fbde891539827d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=35.2 K 2024-11-28T07:22:24,725 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e55c52651dfd445fa329ff5d97d9c70f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732778542399 2024-11-28T07:22:24,726 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c9062eb85ad4400a1fb6b32f4a28a69, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732778542420 2024-11-28T07:22:24,726 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a7ed76a8c89442ea7fbde891539827d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778543564 2024-11-28T07:22:24,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778604730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778604738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778604738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778604738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742119_1295 (size=12104) 2024-11-28T07:22:24,766 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#244 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:24,767 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/97521e74d92d41e9991506add61cf2dd is 50, key is test_row_0/C:col10/1732778543885/Put/seqid=0 2024-11-28T07:22:24,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T07:22:24,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/8f91965c4ac3450695de47de86e7b669 is 50, key is test_row_0/B:col10/1732778543946/Put/seqid=0 2024-11-28T07:22:24,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:24,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T07:22:24,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:24,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:24,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:24,809 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:24,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:24,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742120_1296 (size=12104) 2024-11-28T07:22:24,847 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/97521e74d92d41e9991506add61cf2dd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/97521e74d92d41e9991506add61cf2dd 2024-11-28T07:22:24,854 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into 97521e74d92d41e9991506add61cf2dd(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:24,854 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:24,854 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=13, startTime=1732778544588; duration=0sec 2024-11-28T07:22:24,854 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:24,854 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:24,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742121_1297 (size=12001) 2024-11-28T07:22:24,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778604938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778604941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778604941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:24,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778604943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:24,962 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:24,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T07:22:24,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:24,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:24,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:24,963 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:24,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:24,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:25,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778605091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T07:22:25,115 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:25,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T07:22:25,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:25,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:25,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:25,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:25,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:25,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:25,170 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/9bb86575e6514d0ba1ae67769b1114c0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/9bb86575e6514d0ba1ae67769b1114c0 2024-11-28T07:22:25,178 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into 9bb86575e6514d0ba1ae67769b1114c0(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:25,178 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:25,178 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=13, startTime=1732778544588; duration=0sec 2024-11-28T07:22:25,178 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:25,178 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:25,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778605247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778605247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778605252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778605257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,269 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:25,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T07:22:25,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:25,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:25,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:25,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:25,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:25,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:25,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/8f91965c4ac3450695de47de86e7b669 2024-11-28T07:22:25,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/93ab9e894d7d477e9bce6a057138a80b is 50, key is test_row_0/C:col10/1732778543946/Put/seqid=0 2024-11-28T07:22:25,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742122_1298 (size=12001) 2024-11-28T07:22:25,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/93ab9e894d7d477e9bce6a057138a80b 2024-11-28T07:22:25,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/cf2d566c458548beaf6d3e06b3f161a4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf2d566c458548beaf6d3e06b3f161a4 2024-11-28T07:22:25,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf2d566c458548beaf6d3e06b3f161a4, entries=200, sequenceid=77, filesize=14.0 K 2024-11-28T07:22:25,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/8f91965c4ac3450695de47de86e7b669 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/8f91965c4ac3450695de47de86e7b669 2024-11-28T07:22:25,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/8f91965c4ac3450695de47de86e7b669, entries=150, sequenceid=77, filesize=11.7 K 2024-11-28T07:22:25,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/93ab9e894d7d477e9bce6a057138a80b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9e894d7d477e9bce6a057138a80b 2024-11-28T07:22:25,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9e894d7d477e9bce6a057138a80b, entries=150, sequenceid=77, filesize=11.7 K 2024-11-28T07:22:25,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 936414ebf397eefac328f959953a4d8e in 781ms, sequenceid=77, compaction requested=false 2024-11-28T07:22:25,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:25,422 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:25,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T07:22:25,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:25,427 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:22:25,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:25,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:25,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:25,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:25,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:25,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:25,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/c83baabca20049ef9491898ad84c3194 is 50, key is test_row_0/A:col10/1732778544622/Put/seqid=0 2024-11-28T07:22:25,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742123_1299 (size=12001) 2024-11-28T07:22:25,489 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/c83baabca20049ef9491898ad84c3194 2024-11-28T07:22:25,497 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T07:22:25,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/11f28f92da1a4d49845cd70c7ac65f39 is 50, key is test_row_0/B:col10/1732778544622/Put/seqid=0 2024-11-28T07:22:25,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742124_1300 (size=12001) 2024-11-28T07:22:25,560 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/11f28f92da1a4d49845cd70c7ac65f39 2024-11-28T07:22:25,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/4fe310e09be54459871cc58a3a53bc66 is 50, key is test_row_0/C:col10/1732778544622/Put/seqid=0 2024-11-28T07:22:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T07:22:25,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742125_1301 (size=12001) 2024-11-28T07:22:25,648 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/4fe310e09be54459871cc58a3a53bc66 2024-11-28T07:22:25,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/c83baabca20049ef9491898ad84c3194 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/c83baabca20049ef9491898ad84c3194 2024-11-28T07:22:25,666 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/c83baabca20049ef9491898ad84c3194, entries=150, sequenceid=93, filesize=11.7 K 2024-11-28T07:22:25,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/11f28f92da1a4d49845cd70c7ac65f39 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/11f28f92da1a4d49845cd70c7ac65f39 2024-11-28T07:22:25,679 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/11f28f92da1a4d49845cd70c7ac65f39, entries=150, sequenceid=93, filesize=11.7 K 2024-11-28T07:22:25,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/4fe310e09be54459871cc58a3a53bc66 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/4fe310e09be54459871cc58a3a53bc66 2024-11-28T07:22:25,692 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/4fe310e09be54459871cc58a3a53bc66, entries=150, sequenceid=93, filesize=11.7 K 2024-11-28T07:22:25,694 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 936414ebf397eefac328f959953a4d8e in 267ms, sequenceid=93, compaction requested=true 2024-11-28T07:22:25,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:25,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:25,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-28T07:22:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-28T07:22:25,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-28T07:22:25,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1990 sec 2024-11-28T07:22:25,701 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.2050 sec 2024-11-28T07:22:25,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:22:25,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:25,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:25,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:25,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:25,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:25,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:25,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:25,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/f39c237af8314fc3b7b390436c64e54e is 50, key is test_row_0/A:col10/1732778545772/Put/seqid=0 2024-11-28T07:22:25,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742126_1302 (size=12001) 2024-11-28T07:22:25,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778605833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778605838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778605838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778605851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778605941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778605950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778605953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:25,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:25,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778605960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778606101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778606147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778606159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778606165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778606175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/f39c237af8314fc3b7b390436c64e54e 2024-11-28T07:22:26,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/4bc6e7e6ec264ed4aa816b7b5569d490 is 50, key is test_row_0/B:col10/1732778545772/Put/seqid=0 2024-11-28T07:22:26,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742127_1303 (size=12001) 2024-11-28T07:22:26,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:26,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778606452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:26,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778606472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:26,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778606474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:26,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778606480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:26,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T07:22:26,600 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-28T07:22:26,603 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:26,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-28T07:22:26,606 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:26,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T07:22:26,607 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:26,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:26,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/4bc6e7e6ec264ed4aa816b7b5569d490 2024-11-28T07:22:26,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/acbe2b248f2e43bf820d212d45af7a6b is 50, key is test_row_0/C:col10/1732778545772/Put/seqid=0 2024-11-28T07:22:26,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742128_1304 (size=12001) 2024-11-28T07:22:26,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/acbe2b248f2e43bf820d212d45af7a6b 2024-11-28T07:22:26,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T07:22:26,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/f39c237af8314fc3b7b390436c64e54e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f39c237af8314fc3b7b390436c64e54e 2024-11-28T07:22:26,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f39c237af8314fc3b7b390436c64e54e, entries=150, sequenceid=105, filesize=11.7 K 2024-11-28T07:22:26,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/4bc6e7e6ec264ed4aa816b7b5569d490 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4bc6e7e6ec264ed4aa816b7b5569d490 2024-11-28T07:22:26,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4bc6e7e6ec264ed4aa816b7b5569d490, entries=150, sequenceid=105, filesize=11.7 K 2024-11-28T07:22:26,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/acbe2b248f2e43bf820d212d45af7a6b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/acbe2b248f2e43bf820d212d45af7a6b 2024-11-28T07:22:26,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/acbe2b248f2e43bf820d212d45af7a6b, entries=150, sequenceid=105, filesize=11.7 K 2024-11-28T07:22:26,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 936414ebf397eefac328f959953a4d8e in 973ms, sequenceid=105, compaction requested=true 2024-11-28T07:22:26,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:26,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:26,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:26,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:26,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:22:26,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:26,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T07:22:26,751 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:26,751 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:26,753 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:26,753 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:26,753 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:26,753 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:26,753 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/95af2acc507a4469813f13eb933c6ab8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf2d566c458548beaf6d3e06b3f161a4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/c83baabca20049ef9491898ad84c3194, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f39c237af8314fc3b7b390436c64e54e] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=49.3 K 2024-11-28T07:22:26,753 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:26,753 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:26,753 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/97521e74d92d41e9991506add61cf2dd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9e894d7d477e9bce6a057138a80b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/4fe310e09be54459871cc58a3a53bc66, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/acbe2b248f2e43bf820d212d45af7a6b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=47.0 K 2024-11-28T07:22:26,754 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95af2acc507a4469813f13eb933c6ab8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778543564 2024-11-28T07:22:26,754 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 97521e74d92d41e9991506add61cf2dd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778543564 2024-11-28T07:22:26,754 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf2d566c458548beaf6d3e06b3f161a4, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732778543946 2024-11-28T07:22:26,754 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 93ab9e894d7d477e9bce6a057138a80b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732778543946 2024-11-28T07:22:26,754 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting c83baabca20049ef9491898ad84c3194, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732778544614 2024-11-28T07:22:26,755 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fe310e09be54459871cc58a3a53bc66, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732778544614 2024-11-28T07:22:26,755 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f39c237af8314fc3b7b390436c64e54e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732778545771 2024-11-28T07:22:26,755 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting acbe2b248f2e43bf820d212d45af7a6b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732778545771 2024-11-28T07:22:26,759 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:26,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T07:22:26,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:26,760 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:22:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:26,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:26,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/e46a4f86b9254e8db51a4c3de457a434 is 50, key is test_row_0/A:col10/1732778545827/Put/seqid=0 2024-11-28T07:22:26,795 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#254 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:26,795 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/a27b48fc339e464daa68c9ed18310d6a is 50, key is test_row_0/A:col10/1732778545772/Put/seqid=0 2024-11-28T07:22:26,807 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#255 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:26,807 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/88a4f09c063d4794b11d6e36f962bb9d is 50, key is test_row_0/C:col10/1732778545772/Put/seqid=0 2024-11-28T07:22:26,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742129_1305 (size=12051) 2024-11-28T07:22:26,862 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/e46a4f86b9254e8db51a4c3de457a434 2024-11-28T07:22:26,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T07:22:26,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742130_1306 (size=12241) 2024-11-28T07:22:26,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742131_1307 (size=12241) 2024-11-28T07:22:26,963 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/88a4f09c063d4794b11d6e36f962bb9d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/88a4f09c063d4794b11d6e36f962bb9d 2024-11-28T07:22:26,969 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into 88a4f09c063d4794b11d6e36f962bb9d(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:26,969 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:26,969 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=12, startTime=1732778546751; duration=0sec 2024-11-28T07:22:26,969 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:26,969 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:26,969 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:26,971 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:26,971 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:26,971 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:26,971 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/9bb86575e6514d0ba1ae67769b1114c0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/8f91965c4ac3450695de47de86e7b669, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/11f28f92da1a4d49845cd70c7ac65f39, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4bc6e7e6ec264ed4aa816b7b5569d490] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=47.0 K 2024-11-28T07:22:26,972 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bb86575e6514d0ba1ae67769b1114c0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778543564 2024-11-28T07:22:26,972 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f91965c4ac3450695de47de86e7b669, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1732778543946 2024-11-28T07:22:26,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:26,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:26,973 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 11f28f92da1a4d49845cd70c7ac65f39, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732778544614 2024-11-28T07:22:26,973 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bc6e7e6ec264ed4aa816b7b5569d490, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732778545771 2024-11-28T07:22:26,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/0813f6559ad84ce088c661073541e348 is 50, key is test_row_0/B:col10/1732778545827/Put/seqid=0 2024-11-28T07:22:27,001 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#257 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:27,002 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/46f0f9e3528a44609d384e7417d5fd71 is 50, key is test_row_0/B:col10/1732778545772/Put/seqid=0 2024-11-28T07:22:27,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778606991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778606994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778606996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778607004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742132_1308 (size=12051) 2024-11-28T07:22:27,014 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/0813f6559ad84ce088c661073541e348 2024-11-28T07:22:27,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ba3bea3044f8460db7a3b26a002be71b is 50, key is test_row_0/C:col10/1732778545827/Put/seqid=0 2024-11-28T07:22:27,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742133_1309 (size=12241) 2024-11-28T07:22:27,067 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/46f0f9e3528a44609d384e7417d5fd71 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/46f0f9e3528a44609d384e7417d5fd71 2024-11-28T07:22:27,072 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into 46f0f9e3528a44609d384e7417d5fd71(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:27,072 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:27,072 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=12, startTime=1732778546751; duration=0sec 2024-11-28T07:22:27,072 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:27,072 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:27,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742134_1310 (size=12051) 2024-11-28T07:22:27,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778607104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778607108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778607109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T07:22:27,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778607310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778607312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778607312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,349 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/a27b48fc339e464daa68c9ed18310d6a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a27b48fc339e464daa68c9ed18310d6a 2024-11-28T07:22:27,363 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into a27b48fc339e464daa68c9ed18310d6a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:27,363 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:27,363 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=12, startTime=1732778546750; duration=0sec 2024-11-28T07:22:27,364 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:27,364 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:27,511 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ba3bea3044f8460db7a3b26a002be71b 2024-11-28T07:22:27,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/e46a4f86b9254e8db51a4c3de457a434 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e46a4f86b9254e8db51a4c3de457a434 2024-11-28T07:22:27,523 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e46a4f86b9254e8db51a4c3de457a434, entries=150, sequenceid=130, filesize=11.8 K 2024-11-28T07:22:27,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/0813f6559ad84ce088c661073541e348 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0813f6559ad84ce088c661073541e348 2024-11-28T07:22:27,536 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0813f6559ad84ce088c661073541e348, entries=150, sequenceid=130, filesize=11.8 K 2024-11-28T07:22:27,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ba3bea3044f8460db7a3b26a002be71b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba3bea3044f8460db7a3b26a002be71b 2024-11-28T07:22:27,542 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba3bea3044f8460db7a3b26a002be71b, entries=150, sequenceid=130, filesize=11.8 K 2024-11-28T07:22:27,543 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 936414ebf397eefac328f959953a4d8e in 783ms, sequenceid=130, compaction requested=false 2024-11-28T07:22:27,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:27,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:27,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-28T07:22:27,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-28T07:22:27,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-28T07:22:27,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 938 msec 2024-11-28T07:22:27,549 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 944 msec 2024-11-28T07:22:27,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:27,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:22:27,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:27,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:27,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:27,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:27,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:27,630 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:27,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/53416b46bc5f4b60b4edec7cb0adaf99 is 50, key is test_row_0/A:col10/1732778547623/Put/seqid=0 2024-11-28T07:22:27,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742135_1311 (size=12151) 2024-11-28T07:22:27,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=146 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/53416b46bc5f4b60b4edec7cb0adaf99 2024-11-28T07:22:27,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/479a027456f0420c86b32d66fc4c22dd is 50, key is test_row_0/B:col10/1732778547623/Put/seqid=0 2024-11-28T07:22:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T07:22:27,712 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-28T07:22:27,714 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:27,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-28T07:22:27,716 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:27,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T07:22:27,717 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:27,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:27,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742136_1312 (size=12151) 2024-11-28T07:22:27,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=146 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/479a027456f0420c86b32d66fc4c22dd 2024-11-28T07:22:27,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778607756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778607758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778607758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/904195fdba864488bee5a401b7e4a331 is 50, key is test_row_0/C:col10/1732778547623/Put/seqid=0 2024-11-28T07:22:27,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T07:22:27,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742137_1313 (size=12151) 2024-11-28T07:22:27,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=146 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/904195fdba864488bee5a401b7e4a331 2024-11-28T07:22:27,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/53416b46bc5f4b60b4edec7cb0adaf99 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/53416b46bc5f4b60b4edec7cb0adaf99 2024-11-28T07:22:27,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/53416b46bc5f4b60b4edec7cb0adaf99, entries=150, sequenceid=146, filesize=11.9 K 2024-11-28T07:22:27,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/479a027456f0420c86b32d66fc4c22dd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/479a027456f0420c86b32d66fc4c22dd 2024-11-28T07:22:27,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/479a027456f0420c86b32d66fc4c22dd, entries=150, sequenceid=146, filesize=11.9 K 2024-11-28T07:22:27,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/904195fdba864488bee5a401b7e4a331 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/904195fdba864488bee5a401b7e4a331 2024-11-28T07:22:27,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/904195fdba864488bee5a401b7e4a331, entries=150, sequenceid=146, filesize=11.9 K 2024-11-28T07:22:27,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 936414ebf397eefac328f959953a4d8e in 240ms, sequenceid=146, compaction requested=true 2024-11-28T07:22:27,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:27,868 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:27,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:27,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:27,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:27,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:27,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:27,869 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:27,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:27,870 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:27,870 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:27,870 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:27,870 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a27b48fc339e464daa68c9ed18310d6a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e46a4f86b9254e8db51a4c3de457a434, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/53416b46bc5f4b60b4edec7cb0adaf99] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=35.6 K 2024-11-28T07:22:27,870 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:27,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-28T07:22:27,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:27,871 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T07:22:27,871 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a27b48fc339e464daa68c9ed18310d6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732778545771 2024-11-28T07:22:27,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:27,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:27,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:27,871 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:27,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:27,871 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:27,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:27,871 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:27,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:27,871 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/46f0f9e3528a44609d384e7417d5fd71, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0813f6559ad84ce088c661073541e348, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/479a027456f0420c86b32d66fc4c22dd] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=35.6 K 2024-11-28T07:22:27,872 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e46a4f86b9254e8db51a4c3de457a434, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732778545827 2024-11-28T07:22:27,872 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 46f0f9e3528a44609d384e7417d5fd71, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732778545771 2024-11-28T07:22:27,872 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53416b46bc5f4b60b4edec7cb0adaf99, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732778546989 2024-11-28T07:22:27,873 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0813f6559ad84ce088c661073541e348, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732778545827 2024-11-28T07:22:27,873 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 479a027456f0420c86b32d66fc4c22dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732778546989 2024-11-28T07:22:27,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/0474204bf8f3474c8fbccef3b846a834 is 50, key is test_row_0/A:col10/1732778547708/Put/seqid=0 2024-11-28T07:22:27,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:27,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:27,899 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#263 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:27,900 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/71dce6f8a7dd4a7bac403035fe5a2f4f is 50, key is test_row_0/A:col10/1732778547623/Put/seqid=0 2024-11-28T07:22:27,909 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#264 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:27,909 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/186c9762df804f8ca6125d8cad8a3d98 is 50, key is test_row_0/B:col10/1732778547623/Put/seqid=0 2024-11-28T07:22:27,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778607927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778607928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:27,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778607934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742138_1314 (size=12151) 2024-11-28T07:22:27,939 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/0474204bf8f3474c8fbccef3b846a834 2024-11-28T07:22:27,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742140_1316 (size=12493) 2024-11-28T07:22:27,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/20ee29c561b4466e88c8ced9fbf3a950 is 50, key is test_row_0/B:col10/1732778547708/Put/seqid=0 2024-11-28T07:22:27,973 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/186c9762df804f8ca6125d8cad8a3d98 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/186c9762df804f8ca6125d8cad8a3d98 2024-11-28T07:22:27,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742139_1315 (size=12493) 2024-11-28T07:22:27,979 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into 186c9762df804f8ca6125d8cad8a3d98(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:27,979 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:27,979 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=13, startTime=1732778547868; duration=0sec 2024-11-28T07:22:27,979 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:27,979 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:27,979 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:27,990 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:27,990 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:27,990 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:27,990 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/88a4f09c063d4794b11d6e36f962bb9d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba3bea3044f8460db7a3b26a002be71b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/904195fdba864488bee5a401b7e4a331] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=35.6 K 2024-11-28T07:22:27,991 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 88a4f09c063d4794b11d6e36f962bb9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732778545771 2024-11-28T07:22:27,992 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ba3bea3044f8460db7a3b26a002be71b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732778545827 2024-11-28T07:22:27,992 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 904195fdba864488bee5a401b7e4a331, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732778546989 2024-11-28T07:22:28,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742141_1317 (size=12151) 2024-11-28T07:22:28,003 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/20ee29c561b4466e88c8ced9fbf3a950 2024-11-28T07:22:28,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778608013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T07:22:28,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778608037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778608037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778608038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,045 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#266 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:28,046 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/c6b4fc16ff744659815ef95ecaf16216 is 50, key is test_row_0/C:col10/1732778547623/Put/seqid=0 2024-11-28T07:22:28,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/b4405a20657c4b96a76780fd80935a44 is 50, key is test_row_0/C:col10/1732778547708/Put/seqid=0 2024-11-28T07:22:28,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742142_1318 (size=12493) 2024-11-28T07:22:28,115 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/c6b4fc16ff744659815ef95ecaf16216 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/c6b4fc16ff744659815ef95ecaf16216 2024-11-28T07:22:28,121 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into c6b4fc16ff744659815ef95ecaf16216(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:28,121 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:28,121 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=13, startTime=1732778547869; duration=0sec 2024-11-28T07:22:28,121 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:28,121 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:28,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778608122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,136 DEBUG [Thread-1287 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:22:28,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742143_1319 (size=12151) 2024-11-28T07:22:28,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778608241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778608242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778608243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T07:22:28,393 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/71dce6f8a7dd4a7bac403035fe5a2f4f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/71dce6f8a7dd4a7bac403035fe5a2f4f 2024-11-28T07:22:28,399 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into 71dce6f8a7dd4a7bac403035fe5a2f4f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:28,399 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:28,399 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=13, startTime=1732778547868; duration=0sec 2024-11-28T07:22:28,399 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:28,399 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:28,545 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/b4405a20657c4b96a76780fd80935a44 2024-11-28T07:22:28,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778608546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/0474204bf8f3474c8fbccef3b846a834 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0474204bf8f3474c8fbccef3b846a834 2024-11-28T07:22:28,557 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0474204bf8f3474c8fbccef3b846a834, entries=150, sequenceid=169, filesize=11.9 K 2024-11-28T07:22:28,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/20ee29c561b4466e88c8ced9fbf3a950 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20ee29c561b4466e88c8ced9fbf3a950 2024-11-28T07:22:28,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778608557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,564 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20ee29c561b4466e88c8ced9fbf3a950, entries=150, sequenceid=169, filesize=11.9 K 2024-11-28T07:22:28,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/b4405a20657c4b96a76780fd80935a44 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b4405a20657c4b96a76780fd80935a44 2024-11-28T07:22:28,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:28,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778608568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:28,573 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b4405a20657c4b96a76780fd80935a44, entries=150, sequenceid=169, filesize=11.9 K 2024-11-28T07:22:28,591 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 936414ebf397eefac328f959953a4d8e in 720ms, sequenceid=169, compaction requested=false 2024-11-28T07:22:28,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:28,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:28,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-28T07:22:28,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-28T07:22:28,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-28T07:22:28,595 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 876 msec 2024-11-28T07:22:28,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 882 msec 2024-11-28T07:22:28,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T07:22:28,821 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-28T07:22:28,823 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:28,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-28T07:22:28,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T07:22:28,825 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:28,827 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:28,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:28,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T07:22:28,982 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:28,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-28T07:22:28,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:28,983 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:22:28,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:28,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:28,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:28,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:28,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:28,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:29,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/0811e691addf4e28b23d5d4bfda0d776 is 50, key is test_row_1/A:col10/1732778547932/Put/seqid=0 2024-11-28T07:22:29,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:29,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:29,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742144_1320 (size=9757) 2024-11-28T07:22:29,090 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/0811e691addf4e28b23d5d4bfda0d776 2024-11-28T07:22:29,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/fb37440bb7d34575bc3b9a6eb626e094 is 50, key is test_row_1/B:col10/1732778547932/Put/seqid=0 2024-11-28T07:22:29,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778609121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T07:22:29,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778609123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778609124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742145_1321 (size=9757) 2024-11-28T07:22:29,159 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/fb37440bb7d34575bc3b9a6eb626e094 2024-11-28T07:22:29,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/be01fac6ccf742838f267f8a51b2ae7c is 50, key is test_row_1/C:col10/1732778547932/Put/seqid=0 2024-11-28T07:22:29,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778609226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778609231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778609231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742146_1322 (size=9757) 2024-11-28T07:22:29,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T07:22:29,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778609431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778609436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778609438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,648 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=185 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/be01fac6ccf742838f267f8a51b2ae7c 2024-11-28T07:22:29,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/0811e691addf4e28b23d5d4bfda0d776 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0811e691addf4e28b23d5d4bfda0d776 2024-11-28T07:22:29,658 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0811e691addf4e28b23d5d4bfda0d776, entries=100, sequenceid=185, filesize=9.5 K 2024-11-28T07:22:29,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/fb37440bb7d34575bc3b9a6eb626e094 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fb37440bb7d34575bc3b9a6eb626e094 2024-11-28T07:22:29,678 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fb37440bb7d34575bc3b9a6eb626e094, entries=100, sequenceid=185, filesize=9.5 K 2024-11-28T07:22:29,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/be01fac6ccf742838f267f8a51b2ae7c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/be01fac6ccf742838f267f8a51b2ae7c 2024-11-28T07:22:29,684 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/be01fac6ccf742838f267f8a51b2ae7c, entries=100, sequenceid=185, filesize=9.5 K 2024-11-28T07:22:29,685 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 936414ebf397eefac328f959953a4d8e in 702ms, sequenceid=185, compaction requested=true 2024-11-28T07:22:29,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:29,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:29,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-28T07:22:29,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-28T07:22:29,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-28T07:22:29,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 860 msec 2024-11-28T07:22:29,692 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 868 msec 2024-11-28T07:22:29,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:22:29,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:29,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:29,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:29,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:29,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:29,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:29,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:29,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/8ef88c191fe3461fa66bb4b8ec2b38fc is 50, key is test_row_0/A:col10/1732778549121/Put/seqid=0 2024-11-28T07:22:29,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778609786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778609795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778609795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742147_1323 (size=14541) 2024-11-28T07:22:29,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778609901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778609903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:29,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778609903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:29,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T07:22:29,929 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-28T07:22:29,930 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:29,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-28T07:22:29,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T07:22:29,932 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:29,933 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:29,933 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:30,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T07:22:30,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:30,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778610035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:30,044 DEBUG [Thread-1285 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:22:30,086 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:30,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-28T07:22:30,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:30,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:30,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778610110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:30,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:30,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778610113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:30,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:30,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778610114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:30,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/8ef88c191fe3461fa66bb4b8ec2b38fc 2024-11-28T07:22:30,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T07:22:30,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/e00b6763605747378c23140378f1220c is 50, key is test_row_0/B:col10/1732778549121/Put/seqid=0 2024-11-28T07:22:30,241 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:30,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-28T07:22:30,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:30,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742148_1324 (size=12151) 2024-11-28T07:22:30,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/e00b6763605747378c23140378f1220c 2024-11-28T07:22:30,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/e259699a6943473faade20f182fa29d6 is 50, key is test_row_0/C:col10/1732778549121/Put/seqid=0 2024-11-28T07:22:30,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742149_1325 (size=12151) 2024-11-28T07:22:30,394 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:30,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-28T07:22:30,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:30,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778610417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:30,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778610419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:30,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778610424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T07:22:30,552 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:30,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-28T07:22:30,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:30,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:30,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-28T07:22:30,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:30,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:30,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/e259699a6943473faade20f182fa29d6 2024-11-28T07:22:30,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/8ef88c191fe3461fa66bb4b8ec2b38fc as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/8ef88c191fe3461fa66bb4b8ec2b38fc 2024-11-28T07:22:30,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/8ef88c191fe3461fa66bb4b8ec2b38fc, entries=200, sequenceid=209, filesize=14.2 K 2024-11-28T07:22:30,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/e00b6763605747378c23140378f1220c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e00b6763605747378c23140378f1220c 2024-11-28T07:22:30,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e00b6763605747378c23140378f1220c, entries=150, sequenceid=209, filesize=11.9 K 2024-11-28T07:22:30,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/e259699a6943473faade20f182fa29d6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e259699a6943473faade20f182fa29d6 2024-11-28T07:22:30,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e259699a6943473faade20f182fa29d6, entries=150, sequenceid=209, filesize=11.9 K 2024-11-28T07:22:30,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 936414ebf397eefac328f959953a4d8e in 1029ms, sequenceid=209, compaction requested=true 2024-11-28T07:22:30,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:30,781 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:30,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:30,782 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:30,783 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:30,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:30,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:30,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:30,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:30,783 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48942 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:30,783 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:30,783 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,785 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/71dce6f8a7dd4a7bac403035fe5a2f4f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0474204bf8f3474c8fbccef3b846a834, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0811e691addf4e28b23d5d4bfda0d776, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/8ef88c191fe3461fa66bb4b8ec2b38fc] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=47.8 K 2024-11-28T07:22:30,786 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46552 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:30,786 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:30,786 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,786 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/186c9762df804f8ca6125d8cad8a3d98, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20ee29c561b4466e88c8ced9fbf3a950, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fb37440bb7d34575bc3b9a6eb626e094, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e00b6763605747378c23140378f1220c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=45.5 K 2024-11-28T07:22:30,787 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71dce6f8a7dd4a7bac403035fe5a2f4f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732778546989 2024-11-28T07:22:30,787 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 186c9762df804f8ca6125d8cad8a3d98, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732778546989 2024-11-28T07:22:30,787 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0474204bf8f3474c8fbccef3b846a834, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732778547697 2024-11-28T07:22:30,787 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 20ee29c561b4466e88c8ced9fbf3a950, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732778547697 2024-11-28T07:22:30,787 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting fb37440bb7d34575bc3b9a6eb626e094, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732778547925 2024-11-28T07:22:30,788 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0811e691addf4e28b23d5d4bfda0d776, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732778547925 2024-11-28T07:22:30,788 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e00b6763605747378c23140378f1220c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778549121 2024-11-28T07:22:30,789 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ef88c191fe3461fa66bb4b8ec2b38fc, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778549114 2024-11-28T07:22:30,808 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#274 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:30,809 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/e12e3d09f25149a0969de2fd84a2af75 is 50, key is test_row_0/B:col10/1732778549121/Put/seqid=0 2024-11-28T07:22:30,821 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#275 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:30,822 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/03ffa8b8fec24834ba7863654318332b is 50, key is test_row_0/A:col10/1732778549121/Put/seqid=0 2024-11-28T07:22:30,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742150_1326 (size=12629) 2024-11-28T07:22:30,862 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:30,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-28T07:22:30,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,864 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:22:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:30,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:30,868 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/e12e3d09f25149a0969de2fd84a2af75 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e12e3d09f25149a0969de2fd84a2af75 2024-11-28T07:22:30,874 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into e12e3d09f25149a0969de2fd84a2af75(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:30,874 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:30,874 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=12, startTime=1732778550783; duration=0sec 2024-11-28T07:22:30,874 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:30,874 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:30,874 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:30,876 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46552 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:30,876 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:30,876 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:30,876 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/c6b4fc16ff744659815ef95ecaf16216, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b4405a20657c4b96a76780fd80935a44, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/be01fac6ccf742838f267f8a51b2ae7c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e259699a6943473faade20f182fa29d6] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=45.5 K 2024-11-28T07:22:30,877 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c6b4fc16ff744659815ef95ecaf16216, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=146, earliestPutTs=1732778546989 2024-11-28T07:22:30,877 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b4405a20657c4b96a76780fd80935a44, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732778547697 2024-11-28T07:22:30,878 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting be01fac6ccf742838f267f8a51b2ae7c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=185, earliestPutTs=1732778547925 2024-11-28T07:22:30,878 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e259699a6943473faade20f182fa29d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778549121 2024-11-28T07:22:30,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742151_1327 (size=12629) 2024-11-28T07:22:30,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/2f75befefcbf4e8c815ff2b188c0b3af is 50, key is test_row_0/A:col10/1732778549784/Put/seqid=0 2024-11-28T07:22:30,898 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/03ffa8b8fec24834ba7863654318332b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/03ffa8b8fec24834ba7863654318332b 2024-11-28T07:22:30,906 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into 03ffa8b8fec24834ba7863654318332b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:30,906 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:30,906 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=12, startTime=1732778550781; duration=0sec 2024-11-28T07:22:30,906 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:30,906 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:30,916 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#277 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:30,917 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/22507e3d8196459ab58e767f4dbe5be4 is 50, key is test_row_0/C:col10/1732778549121/Put/seqid=0 2024-11-28T07:22:30,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:30,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:30,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742152_1328 (size=12151) 2024-11-28T07:22:30,969 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/2f75befefcbf4e8c815ff2b188c0b3af 2024-11-28T07:22:30,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742153_1329 (size=12629) 2024-11-28T07:22:30,992 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/22507e3d8196459ab58e767f4dbe5be4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/22507e3d8196459ab58e767f4dbe5be4 2024-11-28T07:22:31,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/13262c1f99e84e6e805c6dfc7ba49d85 is 50, key is test_row_0/B:col10/1732778549784/Put/seqid=0 2024-11-28T07:22:31,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T07:22:31,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742154_1330 (size=12151) 2024-11-28T07:22:31,039 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into 22507e3d8196459ab58e767f4dbe5be4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:31,039 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:31,039 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=12, startTime=1732778550783; duration=0sec 2024-11-28T07:22:31,039 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:31,039 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:31,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778611041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778611041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778611045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778611147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778611152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778611153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778611349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778611357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778611358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,436 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/13262c1f99e84e6e805c6dfc7ba49d85 2024-11-28T07:22:31,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/740fe2b632774004b2281ee54fc1cce8 is 50, key is test_row_0/C:col10/1732778549784/Put/seqid=0 2024-11-28T07:22:31,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742155_1331 (size=12151) 2024-11-28T07:22:31,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778611656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778611662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778611663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:31,910 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/740fe2b632774004b2281ee54fc1cce8 2024-11-28T07:22:31,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/2f75befefcbf4e8c815ff2b188c0b3af as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/2f75befefcbf4e8c815ff2b188c0b3af 2024-11-28T07:22:31,920 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/2f75befefcbf4e8c815ff2b188c0b3af, entries=150, sequenceid=221, filesize=11.9 K 2024-11-28T07:22:31,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/13262c1f99e84e6e805c6dfc7ba49d85 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/13262c1f99e84e6e805c6dfc7ba49d85 2024-11-28T07:22:31,925 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/13262c1f99e84e6e805c6dfc7ba49d85, entries=150, sequenceid=221, filesize=11.9 K 2024-11-28T07:22:31,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/740fe2b632774004b2281ee54fc1cce8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/740fe2b632774004b2281ee54fc1cce8 2024-11-28T07:22:31,941 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/740fe2b632774004b2281ee54fc1cce8, entries=150, sequenceid=221, filesize=11.9 K 2024-11-28T07:22:31,942 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 936414ebf397eefac328f959953a4d8e in 1079ms, sequenceid=221, compaction requested=false 2024-11-28T07:22:31,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:31,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:31,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-28T07:22:31,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-28T07:22:31,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-28T07:22:31,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0110 sec 2024-11-28T07:22:31,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.0150 sec 2024-11-28T07:22:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T07:22:32,037 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-28T07:22:32,039 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-28T07:22:32,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T07:22:32,040 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:32,041 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:32,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T07:22:32,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:22:32,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:32,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:32,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:32,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:32,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:32,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:32,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/f7dab651e27c4457979bcbe92f2b3999 is 50, key is test_row_0/A:col10/1732778550982/Put/seqid=0 2024-11-28T07:22:32,193 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:32,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T07:22:32,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:32,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,194 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778612200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778612201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778612208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778612209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742156_1332 (size=14541) 2024-11-28T07:22:32,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778612314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778612314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778612325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778612325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T07:22:32,347 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:32,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T07:22:32,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:32,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,352 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,505 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:32,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T07:22:32,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:32,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778612525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778612526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778612532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778612532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/f7dab651e27c4457979bcbe92f2b3999 2024-11-28T07:22:32,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/a662eceb0a5644709dafdc1f6d8d1e38 is 50, key is test_row_0/B:col10/1732778550982/Put/seqid=0 2024-11-28T07:22:32,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T07:22:32,665 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:32,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T07:22:32,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:32,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742157_1333 (size=12151) 2024-11-28T07:22:32,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/a662eceb0a5644709dafdc1f6d8d1e38 2024-11-28T07:22:32,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/cc0d68685efb4fdbb11cbd07374dea60 is 50, key is test_row_0/C:col10/1732778550982/Put/seqid=0 2024-11-28T07:22:32,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742158_1334 (size=12151) 2024-11-28T07:22:32,818 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:32,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T07:22:32,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:32,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778612831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778612833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778612839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:32,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778612839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:32,972 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:32,973 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T07:22:32,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:32,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:32,973 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:32,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:33,129 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:33,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T07:22:33,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:33,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:33,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:33,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:33,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:33,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:33,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T07:22:33,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/cc0d68685efb4fdbb11cbd07374dea60 2024-11-28T07:22:33,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/f7dab651e27c4457979bcbe92f2b3999 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f7dab651e27c4457979bcbe92f2b3999 2024-11-28T07:22:33,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f7dab651e27c4457979bcbe92f2b3999, entries=200, sequenceid=249, filesize=14.2 K 2024-11-28T07:22:33,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/a662eceb0a5644709dafdc1f6d8d1e38 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a662eceb0a5644709dafdc1f6d8d1e38 2024-11-28T07:22:33,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a662eceb0a5644709dafdc1f6d8d1e38, entries=150, sequenceid=249, filesize=11.9 K 2024-11-28T07:22:33,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/cc0d68685efb4fdbb11cbd07374dea60 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cc0d68685efb4fdbb11cbd07374dea60 2024-11-28T07:22:33,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cc0d68685efb4fdbb11cbd07374dea60, entries=150, sequenceid=249, filesize=11.9 K 2024-11-28T07:22:33,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 936414ebf397eefac328f959953a4d8e in 1042ms, sequenceid=249, compaction requested=true 2024-11-28T07:22:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:33,209 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:22:33,209 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:33,211 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39321 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:33,211 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:33,211 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:33,211 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:33,211 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:33,211 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/03ffa8b8fec24834ba7863654318332b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/2f75befefcbf4e8c815ff2b188c0b3af, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f7dab651e27c4457979bcbe92f2b3999] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=38.4 K 2024-11-28T07:22:33,211 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:33,211 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e12e3d09f25149a0969de2fd84a2af75, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/13262c1f99e84e6e805c6dfc7ba49d85, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a662eceb0a5644709dafdc1f6d8d1e38] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=36.1 K 2024-11-28T07:22:33,212 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03ffa8b8fec24834ba7863654318332b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778549121 2024-11-28T07:22:33,212 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e12e3d09f25149a0969de2fd84a2af75, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778549121 2024-11-28T07:22:33,212 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f75befefcbf4e8c815ff2b188c0b3af, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732778549775 2024-11-28T07:22:33,213 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 13262c1f99e84e6e805c6dfc7ba49d85, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732778549775 2024-11-28T07:22:33,213 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7dab651e27c4457979bcbe92f2b3999, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732778550982 2024-11-28T07:22:33,213 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a662eceb0a5644709dafdc1f6d8d1e38, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732778550982 2024-11-28T07:22:33,245 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#283 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:33,246 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/ba0d0f1a2862485285e373ac012ab655 is 50, key is test_row_0/A:col10/1732778550982/Put/seqid=0 2024-11-28T07:22:33,257 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#284 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:33,258 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/20abf06d6cc44f72bfc4a0cbf31a4b2e is 50, key is test_row_0/B:col10/1732778550982/Put/seqid=0 2024-11-28T07:22:33,285 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:33,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T07:22:33,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:33,286 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:22:33,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:33,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:33,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:33,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:33,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:33,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:33,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742159_1335 (size=12731) 2024-11-28T07:22:33,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742160_1336 (size=12731) 2024-11-28T07:22:33,339 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/20abf06d6cc44f72bfc4a0cbf31a4b2e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20abf06d6cc44f72bfc4a0cbf31a4b2e 2024-11-28T07:22:33,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/9eae15e48cbf4f5f9e4d2b93e9db2ea2 is 50, key is test_row_0/A:col10/1732778552177/Put/seqid=0 2024-11-28T07:22:33,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:33,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:33,347 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into 20abf06d6cc44f72bfc4a0cbf31a4b2e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:33,347 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:33,347 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=13, startTime=1732778553209; duration=0sec 2024-11-28T07:22:33,347 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:33,347 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:33,347 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:33,350 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:33,350 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:33,350 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:33,350 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/22507e3d8196459ab58e767f4dbe5be4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/740fe2b632774004b2281ee54fc1cce8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cc0d68685efb4fdbb11cbd07374dea60] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=36.1 K 2024-11-28T07:22:33,350 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 22507e3d8196459ab58e767f4dbe5be4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778549121 2024-11-28T07:22:33,351 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 740fe2b632774004b2281ee54fc1cce8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732778549775 2024-11-28T07:22:33,351 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting cc0d68685efb4fdbb11cbd07374dea60, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732778550982 2024-11-28T07:22:33,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742161_1337 (size=12251) 2024-11-28T07:22:33,370 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/9eae15e48cbf4f5f9e4d2b93e9db2ea2 2024-11-28T07:22:33,373 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#286 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:33,374 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/5701339619cc4dd1ab1a85e7ea053c9a is 50, key is test_row_0/C:col10/1732778550982/Put/seqid=0 2024-11-28T07:22:33,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/1c3045b6b0ff42ca80b1ff48264200b1 is 50, key is test_row_0/B:col10/1732778552177/Put/seqid=0 2024-11-28T07:22:33,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778613395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742162_1338 (size=12731) 2024-11-28T07:22:33,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778613405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,419 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/5701339619cc4dd1ab1a85e7ea053c9a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/5701339619cc4dd1ab1a85e7ea053c9a 2024-11-28T07:22:33,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778613407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778613414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,426 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into 5701339619cc4dd1ab1a85e7ea053c9a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:33,426 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:33,426 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=13, startTime=1732778553209; duration=0sec 2024-11-28T07:22:33,426 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:33,426 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:33,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742163_1339 (size=12251) 2024-11-28T07:22:33,447 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/1c3045b6b0ff42ca80b1ff48264200b1 2024-11-28T07:22:33,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1d702aaabe9b49c197559b57c202a2af is 50, key is test_row_0/C:col10/1732778552177/Put/seqid=0 2024-11-28T07:22:33,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778613508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778613517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778613526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778613526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742164_1340 (size=12251) 2024-11-28T07:22:33,531 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1d702aaabe9b49c197559b57c202a2af 2024-11-28T07:22:33,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/9eae15e48cbf4f5f9e4d2b93e9db2ea2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/9eae15e48cbf4f5f9e4d2b93e9db2ea2 2024-11-28T07:22:33,545 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/9eae15e48cbf4f5f9e4d2b93e9db2ea2, entries=150, sequenceid=261, filesize=12.0 K 2024-11-28T07:22:33,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/1c3045b6b0ff42ca80b1ff48264200b1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/1c3045b6b0ff42ca80b1ff48264200b1 2024-11-28T07:22:33,551 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/1c3045b6b0ff42ca80b1ff48264200b1, entries=150, sequenceid=261, filesize=12.0 K 2024-11-28T07:22:33,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1d702aaabe9b49c197559b57c202a2af as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1d702aaabe9b49c197559b57c202a2af 2024-11-28T07:22:33,556 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1d702aaabe9b49c197559b57c202a2af, entries=150, sequenceid=261, filesize=12.0 K 2024-11-28T07:22:33,557 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 936414ebf397eefac328f959953a4d8e in 271ms, sequenceid=261, compaction requested=false 2024-11-28T07:22:33,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:33,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:33,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-28T07:22:33,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-28T07:22:33,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-28T07:22:33,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5190 sec 2024-11-28T07:22:33,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.5230 sec 2024-11-28T07:22:33,712 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/ba0d0f1a2862485285e373ac012ab655 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/ba0d0f1a2862485285e373ac012ab655 2024-11-28T07:22:33,725 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into ba0d0f1a2862485285e373ac012ab655(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:33,726 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:33,726 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=13, startTime=1732778553209; duration=0sec 2024-11-28T07:22:33,726 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:33,726 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:33,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T07:22:33,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:33,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:33,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:33,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:33,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:33,726 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:33,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:33,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778613747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778613748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778613749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778613751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/50a210b129a3438ea04a4b3d1f1e2f95 is 50, key is test_row_0/A:col10/1732778553405/Put/seqid=0 2024-11-28T07:22:33,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742165_1341 (size=12301) 2024-11-28T07:22:33,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/50a210b129a3438ea04a4b3d1f1e2f95 2024-11-28T07:22:33,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/ac39fc3b751c4d1d95f123bd879a0a1a is 50, key is test_row_0/B:col10/1732778553405/Put/seqid=0 2024-11-28T07:22:33,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778613854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778613853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778613854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:33,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778613859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:33,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742166_1342 (size=12301) 2024-11-28T07:22:34,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778614061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778614062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778614062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778614065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49630 deadline: 1732778614080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,085 DEBUG [Thread-1285 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8235 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:22:34,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T07:22:34,156 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-28T07:22:34,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:34,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-28T07:22:34,160 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:34,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T07:22:34,161 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:34,161 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:34,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/ac39fc3b751c4d1d95f123bd879a0a1a 2024-11-28T07:22:34,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/bede5668736c4850b47a1f8fb400ced1 is 50, key is test_row_0/C:col10/1732778553405/Put/seqid=0 2024-11-28T07:22:34,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T07:22:34,313 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:34,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-28T07:22:34,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:34,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:34,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:34,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:34,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742167_1343 (size=12301) 2024-11-28T07:22:34,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/bede5668736c4850b47a1f8fb400ced1 2024-11-28T07:22:34,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/50a210b129a3438ea04a4b3d1f1e2f95 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/50a210b129a3438ea04a4b3d1f1e2f95 2024-11-28T07:22:34,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/50a210b129a3438ea04a4b3d1f1e2f95, entries=150, sequenceid=290, filesize=12.0 K 2024-11-28T07:22:34,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/ac39fc3b751c4d1d95f123bd879a0a1a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/ac39fc3b751c4d1d95f123bd879a0a1a 2024-11-28T07:22:34,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/ac39fc3b751c4d1d95f123bd879a0a1a, entries=150, sequenceid=290, filesize=12.0 K 2024-11-28T07:22:34,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/bede5668736c4850b47a1f8fb400ced1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/bede5668736c4850b47a1f8fb400ced1 2024-11-28T07:22:34,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778614367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778614369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/bede5668736c4850b47a1f8fb400ced1, entries=150, sequenceid=290, filesize=12.0 K 2024-11-28T07:22:34,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 936414ebf397eefac328f959953a4d8e in 648ms, sequenceid=290, compaction requested=true 2024-11-28T07:22:34,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:34,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:34,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:34,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:34,374 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:34,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:34,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:34,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:22:34,374 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:34,375 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:34,376 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:34,376 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,376 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20abf06d6cc44f72bfc4a0cbf31a4b2e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/1c3045b6b0ff42ca80b1ff48264200b1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/ac39fc3b751c4d1d95f123bd879a0a1a] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=36.4 K 2024-11-28T07:22:34,376 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:34,376 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20abf06d6cc44f72bfc4a0cbf31a4b2e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732778550982 2024-11-28T07:22:34,376 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:34,376 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,376 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/ba0d0f1a2862485285e373ac012ab655, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/9eae15e48cbf4f5f9e4d2b93e9db2ea2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/50a210b129a3438ea04a4b3d1f1e2f95] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=36.4 K 2024-11-28T07:22:34,377 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c3045b6b0ff42ca80b1ff48264200b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732778552177 2024-11-28T07:22:34,377 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac39fc3b751c4d1d95f123bd879a0a1a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732778553399 2024-11-28T07:22:34,377 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ba0d0f1a2862485285e373ac012ab655, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732778550982 2024-11-28T07:22:34,377 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9eae15e48cbf4f5f9e4d2b93e9db2ea2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732778552177 2024-11-28T07:22:34,378 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 50a210b129a3438ea04a4b3d1f1e2f95, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732778553399 2024-11-28T07:22:34,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:34,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:22:34,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:34,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:34,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:34,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:34,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:34,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:34,396 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#292 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:34,397 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/504e9c97889944e09814d187f0cf03df is 50, key is test_row_0/A:col10/1732778553405/Put/seqid=0 2024-11-28T07:22:34,411 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#293 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:34,412 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/b14508abf7364a67af06a16008a963f9 is 50, key is test_row_0/B:col10/1732778553405/Put/seqid=0 2024-11-28T07:22:34,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/6e33f417d75845a0a98fc58a86b6af28 is 50, key is test_row_0/A:col10/1732778554381/Put/seqid=0 2024-11-28T07:22:34,467 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:34,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742168_1344 (size=12983) 2024-11-28T07:22:34,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-28T07:22:34,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:34,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:34,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:34,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:34,485 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/504e9c97889944e09814d187f0cf03df as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/504e9c97889944e09814d187f0cf03df 2024-11-28T07:22:34,491 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into 504e9c97889944e09814d187f0cf03df(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:34,491 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:34,491 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=13, startTime=1732778554374; duration=0sec 2024-11-28T07:22:34,491 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:34,491 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:34,491 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:34,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T07:22:34,494 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:34,494 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:34,494 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,494 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/5701339619cc4dd1ab1a85e7ea053c9a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1d702aaabe9b49c197559b57c202a2af, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/bede5668736c4850b47a1f8fb400ced1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=36.4 K 2024-11-28T07:22:34,495 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5701339619cc4dd1ab1a85e7ea053c9a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732778550982 2024-11-28T07:22:34,495 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d702aaabe9b49c197559b57c202a2af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732778552177 2024-11-28T07:22:34,496 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting bede5668736c4850b47a1f8fb400ced1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732778553399 2024-11-28T07:22:34,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742169_1345 (size=12983) 2024-11-28T07:22:34,521 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/b14508abf7364a67af06a16008a963f9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/b14508abf7364a67af06a16008a963f9 2024-11-28T07:22:34,528 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into b14508abf7364a67af06a16008a963f9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:34,528 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:34,528 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=13, startTime=1732778554374; duration=0sec 2024-11-28T07:22:34,528 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:34,528 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:34,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778614516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742170_1346 (size=14741) 2024-11-28T07:22:34,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778614526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/6e33f417d75845a0a98fc58a86b6af28 2024-11-28T07:22:34,562 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#295 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:34,563 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/431aa55a35334c7a9a4c2bda9b326839 is 50, key is test_row_0/C:col10/1732778553405/Put/seqid=0 2024-11-28T07:22:34,579 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/610144bb5fd64f908d97455a3752d64f is 50, key is test_row_0/B:col10/1732778554381/Put/seqid=0 2024-11-28T07:22:34,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742171_1347 (size=12983) 2024-11-28T07:22:34,626 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/431aa55a35334c7a9a4c2bda9b326839 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/431aa55a35334c7a9a4c2bda9b326839 2024-11-28T07:22:34,633 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into 431aa55a35334c7a9a4c2bda9b326839(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:34,633 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:34,633 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=13, startTime=1732778554374; duration=0sec 2024-11-28T07:22:34,633 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:34,633 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:34,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778614630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,634 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:34,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-28T07:22:34,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:34,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:34,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:34,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:34,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742172_1348 (size=12301) 2024-11-28T07:22:34,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778614636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/610144bb5fd64f908d97455a3752d64f 2024-11-28T07:22:34,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/d047001203e945fb8b02b70c8d59b345 is 50, key is test_row_0/C:col10/1732778554381/Put/seqid=0 2024-11-28T07:22:34,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742173_1349 (size=12301) 2024-11-28T07:22:34,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/d047001203e945fb8b02b70c8d59b345 2024-11-28T07:22:34,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/6e33f417d75845a0a98fc58a86b6af28 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6e33f417d75845a0a98fc58a86b6af28 2024-11-28T07:22:34,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6e33f417d75845a0a98fc58a86b6af28, entries=200, sequenceid=301, filesize=14.4 K 2024-11-28T07:22:34,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/610144bb5fd64f908d97455a3752d64f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/610144bb5fd64f908d97455a3752d64f 2024-11-28T07:22:34,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/610144bb5fd64f908d97455a3752d64f, entries=150, sequenceid=301, filesize=12.0 K 2024-11-28T07:22:34,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/d047001203e945fb8b02b70c8d59b345 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/d047001203e945fb8b02b70c8d59b345 2024-11-28T07:22:34,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/d047001203e945fb8b02b70c8d59b345, entries=150, sequenceid=301, filesize=12.0 K 2024-11-28T07:22:34,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 936414ebf397eefac328f959953a4d8e in 355ms, sequenceid=301, compaction requested=false 2024-11-28T07:22:34,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:34,793 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:34,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T07:22:34,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-28T07:22:34,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:34,797 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:22:34,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:34,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:34,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:34,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:34,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:34,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:34,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/0572904d8a9e4c10800f2a2d449448bc is 50, key is test_row_0/A:col10/1732778554501/Put/seqid=0 2024-11-28T07:22:34,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:34,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:34,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742174_1350 (size=12301) 2024-11-28T07:22:34,874 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/0572904d8a9e4c10800f2a2d449448bc 2024-11-28T07:22:34,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/99de4a3337064d8b8b3b30d20539b664 is 50, key is test_row_0/B:col10/1732778554501/Put/seqid=0 2024-11-28T07:22:34,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778614889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778614889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778614890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:34,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778614891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:34,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742175_1351 (size=12301) 2024-11-28T07:22:34,926 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/99de4a3337064d8b8b3b30d20539b664 2024-11-28T07:22:34,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/84288d9b7e774245a25b557d0208740d is 50, key is test_row_0/C:col10/1732778554501/Put/seqid=0 2024-11-28T07:22:34,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742176_1352 (size=12301) 2024-11-28T07:22:35,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778614997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,002 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/84288d9b7e774245a25b557d0208740d 2024-11-28T07:22:35,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778614998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778614999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778614999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/0572904d8a9e4c10800f2a2d449448bc as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0572904d8a9e4c10800f2a2d449448bc 2024-11-28T07:22:35,012 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0572904d8a9e4c10800f2a2d449448bc, entries=150, sequenceid=329, filesize=12.0 K 2024-11-28T07:22:35,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/99de4a3337064d8b8b3b30d20539b664 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/99de4a3337064d8b8b3b30d20539b664 2024-11-28T07:22:35,018 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/99de4a3337064d8b8b3b30d20539b664, entries=150, sequenceid=329, filesize=12.0 K 2024-11-28T07:22:35,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/84288d9b7e774245a25b557d0208740d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/84288d9b7e774245a25b557d0208740d 2024-11-28T07:22:35,026 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/84288d9b7e774245a25b557d0208740d, entries=150, sequenceid=329, filesize=12.0 K 2024-11-28T07:22:35,027 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 936414ebf397eefac328f959953a4d8e in 230ms, sequenceid=329, compaction requested=true 2024-11-28T07:22:35,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:35,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-28T07:22:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-28T07:22:35,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-28T07:22:35,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 868 msec 2024-11-28T07:22:35,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 875 msec 2024-11-28T07:22:35,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:22:35,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:35,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:35,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:35,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:35,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:35,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:35,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:35,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/10bc9b8cf2084d91b2644735289f831b is 50, key is test_row_0/A:col10/1732778555213/Put/seqid=0 2024-11-28T07:22:35,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742177_1353 (size=12301) 2024-11-28T07:22:35,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778615276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778615283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778615284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778615285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T07:22:35,295 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-28T07:22:35,297 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:35,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-28T07:22:35,298 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:35,299 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:35,300 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:35,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T07:22:35,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778615386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778615389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778615391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778615391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T07:22:35,452 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:35,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T07:22:35,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:35,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,453 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778615590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778615596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778615597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778615599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T07:22:35,605 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:35,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T07:22:35,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:35,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,606 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/10bc9b8cf2084d91b2644735289f831b 2024-11-28T07:22:35,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/982b890f9823412e873f419cde6d4e64 is 50, key is test_row_0/B:col10/1732778555213/Put/seqid=0 2024-11-28T07:22:35,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742178_1354 (size=12301) 2024-11-28T07:22:35,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/982b890f9823412e873f419cde6d4e64 2024-11-28T07:22:35,758 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:35,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T07:22:35,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:35,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/b502948cabfd463e8229c0b9fe1d4e5c is 50, key is test_row_0/C:col10/1732778555213/Put/seqid=0 2024-11-28T07:22:35,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742179_1355 (size=12301) 2024-11-28T07:22:35,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/b502948cabfd463e8229c0b9fe1d4e5c 2024-11-28T07:22:35,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/10bc9b8cf2084d91b2644735289f831b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/10bc9b8cf2084d91b2644735289f831b 2024-11-28T07:22:35,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/10bc9b8cf2084d91b2644735289f831b, entries=150, sequenceid=343, filesize=12.0 K 2024-11-28T07:22:35,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/982b890f9823412e873f419cde6d4e64 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/982b890f9823412e873f419cde6d4e64 2024-11-28T07:22:35,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/982b890f9823412e873f419cde6d4e64, entries=150, sequenceid=343, filesize=12.0 K 2024-11-28T07:22:35,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/b502948cabfd463e8229c0b9fe1d4e5c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b502948cabfd463e8229c0b9fe1d4e5c 2024-11-28T07:22:35,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b502948cabfd463e8229c0b9fe1d4e5c, entries=150, sequenceid=343, filesize=12.0 K 2024-11-28T07:22:35,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 936414ebf397eefac328f959953a4d8e in 645ms, sequenceid=343, compaction requested=true 2024-11-28T07:22:35,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:35,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:35,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:35,860 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:35,860 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:35,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:35,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:35,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:35,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:35,862 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:35,862 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:35,862 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:35,862 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:35,862 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,862 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,863 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/504e9c97889944e09814d187f0cf03df, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6e33f417d75845a0a98fc58a86b6af28, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0572904d8a9e4c10800f2a2d449448bc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/10bc9b8cf2084d91b2644735289f831b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=51.1 K 2024-11-28T07:22:35,863 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/b14508abf7364a67af06a16008a963f9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/610144bb5fd64f908d97455a3752d64f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/99de4a3337064d8b8b3b30d20539b664, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/982b890f9823412e873f419cde6d4e64] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=48.7 K 2024-11-28T07:22:35,863 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 504e9c97889944e09814d187f0cf03df, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732778553399 2024-11-28T07:22:35,863 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b14508abf7364a67af06a16008a963f9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732778553399 2024-11-28T07:22:35,863 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e33f417d75845a0a98fc58a86b6af28, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732778553740 2024-11-28T07:22:35,864 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 610144bb5fd64f908d97455a3752d64f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732778553747 2024-11-28T07:22:35,864 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0572904d8a9e4c10800f2a2d449448bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732778554497 2024-11-28T07:22:35,865 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10bc9b8cf2084d91b2644735289f831b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732778554882 2024-11-28T07:22:35,865 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 99de4a3337064d8b8b3b30d20539b664, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732778554497 2024-11-28T07:22:35,866 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 982b890f9823412e873f419cde6d4e64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732778554882 2024-11-28T07:22:35,878 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#304 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:35,879 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/e5bcd7d353374fba81be5285b02cb645 is 50, key is test_row_0/A:col10/1732778555213/Put/seqid=0 2024-11-28T07:22:35,887 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#305 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:35,887 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/a6b63ecead7b4a1084f59101de93dd2d is 50, key is test_row_0/B:col10/1732778555213/Put/seqid=0 2024-11-28T07:22:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:35,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:22:35,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:35,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:35,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:35,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T07:22:35,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:35,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:35,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:35,913 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:35,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T07:22:35,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:35,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:35,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778615928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778615929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778615933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:35,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778615934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:35,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/cf18fa92aaa24a85a76a680f6e5f666e is 50, key is test_row_0/A:col10/1732778555898/Put/seqid=0 2024-11-28T07:22:35,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742180_1356 (size=13119) 2024-11-28T07:22:35,959 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/e5bcd7d353374fba81be5285b02cb645 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e5bcd7d353374fba81be5285b02cb645 2024-11-28T07:22:35,965 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into e5bcd7d353374fba81be5285b02cb645(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:35,965 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:35,965 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=12, startTime=1732778555860; duration=0sec 2024-11-28T07:22:35,965 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:35,965 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:35,965 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:35,968 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:35,968 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:35,968 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:35,968 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/431aa55a35334c7a9a4c2bda9b326839, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/d047001203e945fb8b02b70c8d59b345, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/84288d9b7e774245a25b557d0208740d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b502948cabfd463e8229c0b9fe1d4e5c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=48.7 K 2024-11-28T07:22:35,968 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 431aa55a35334c7a9a4c2bda9b326839, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1732778553399 2024-11-28T07:22:35,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742181_1357 (size=13119) 2024-11-28T07:22:35,971 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d047001203e945fb8b02b70c8d59b345, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732778553747 2024-11-28T07:22:35,975 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84288d9b7e774245a25b557d0208740d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732778554497 2024-11-28T07:22:35,976 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b502948cabfd463e8229c0b9fe1d4e5c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732778554882 2024-11-28T07:22:35,981 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/a6b63ecead7b4a1084f59101de93dd2d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a6b63ecead7b4a1084f59101de93dd2d 2024-11-28T07:22:35,988 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into a6b63ecead7b4a1084f59101de93dd2d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:35,988 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:35,988 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=12, startTime=1732778555860; duration=0sec 2024-11-28T07:22:35,988 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:35,989 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:36,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742182_1358 (size=14741) 2024-11-28T07:22:36,010 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/cf18fa92aaa24a85a76a680f6e5f666e 2024-11-28T07:22:36,020 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#307 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:36,020 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/93ab9c1c1f664ee381cc4869d06aa629 is 50, key is test_row_0/C:col10/1732778555213/Put/seqid=0 2024-11-28T07:22:36,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778616035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/d542edef09cb43e58affb03fac775d95 is 50, key is test_row_0/B:col10/1732778555898/Put/seqid=0 2024-11-28T07:22:36,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778616037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778616042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778616042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,067 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:36,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T07:22:36,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:36,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742183_1359 (size=13119) 2024-11-28T07:22:36,115 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/93ab9c1c1f664ee381cc4869d06aa629 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9c1c1f664ee381cc4869d06aa629 2024-11-28T07:22:36,128 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into 93ab9c1c1f664ee381cc4869d06aa629(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:36,129 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:36,129 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=12, startTime=1732778555861; duration=0sec 2024-11-28T07:22:36,129 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:36,129 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:36,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742184_1360 (size=12301) 2024-11-28T07:22:36,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/d542edef09cb43e58affb03fac775d95 2024-11-28T07:22:36,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/30e5bac54b8f4c81917bfecb11538328 is 50, key is test_row_0/C:col10/1732778555898/Put/seqid=0 2024-11-28T07:22:36,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742185_1361 (size=12301) 2024-11-28T07:22:36,220 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:36,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T07:22:36,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:36,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778616243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778616246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778616249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778616256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,374 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:36,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T07:22:36,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:36,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T07:22:36,529 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:36,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T07:22:36,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:36,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:36,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778616552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778616553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778616553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:36,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778616559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:36,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/30e5bac54b8f4c81917bfecb11538328 2024-11-28T07:22:36,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/cf18fa92aaa24a85a76a680f6e5f666e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf18fa92aaa24a85a76a680f6e5f666e 2024-11-28T07:22:36,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf18fa92aaa24a85a76a680f6e5f666e, entries=200, sequenceid=367, filesize=14.4 K 2024-11-28T07:22:36,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/d542edef09cb43e58affb03fac775d95 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d542edef09cb43e58affb03fac775d95 2024-11-28T07:22:36,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d542edef09cb43e58affb03fac775d95, entries=150, sequenceid=367, filesize=12.0 K 2024-11-28T07:22:36,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/30e5bac54b8f4c81917bfecb11538328 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/30e5bac54b8f4c81917bfecb11538328 2024-11-28T07:22:36,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/30e5bac54b8f4c81917bfecb11538328, entries=150, sequenceid=367, filesize=12.0 K 2024-11-28T07:22:36,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 936414ebf397eefac328f959953a4d8e in 749ms, sequenceid=367, compaction requested=false 2024-11-28T07:22:36,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:36,683 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:36,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T07:22:36,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,684 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:22:36,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:36,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:36,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:36,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:36,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:36,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:36,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/bcc184a53c8f4f05b5ff6d363ccb2970 is 50, key is test_row_0/A:col10/1732778555925/Put/seqid=0 2024-11-28T07:22:36,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742186_1362 (size=12301) 2024-11-28T07:22:36,719 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/bcc184a53c8f4f05b5ff6d363ccb2970 2024-11-28T07:22:36,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/df9bba620c48460ebd93e4599f11b7de is 50, key is test_row_0/B:col10/1732778555925/Put/seqid=0 2024-11-28T07:22:36,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742187_1363 (size=12301) 2024-11-28T07:22:36,768 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/df9bba620c48460ebd93e4599f11b7de 2024-11-28T07:22:36,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/557ddddf4ba74b6a81f3e9e92c1902f0 is 50, key is test_row_0/C:col10/1732778555925/Put/seqid=0 2024-11-28T07:22:36,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742188_1364 (size=12301) 2024-11-28T07:22:36,828 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/557ddddf4ba74b6a81f3e9e92c1902f0 2024-11-28T07:22:36,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/bcc184a53c8f4f05b5ff6d363ccb2970 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/bcc184a53c8f4f05b5ff6d363ccb2970 2024-11-28T07:22:36,838 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/bcc184a53c8f4f05b5ff6d363ccb2970, entries=150, sequenceid=382, filesize=12.0 K 2024-11-28T07:22:36,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/df9bba620c48460ebd93e4599f11b7de as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/df9bba620c48460ebd93e4599f11b7de 2024-11-28T07:22:36,850 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/df9bba620c48460ebd93e4599f11b7de, entries=150, sequenceid=382, filesize=12.0 K 2024-11-28T07:22:36,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/557ddddf4ba74b6a81f3e9e92c1902f0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/557ddddf4ba74b6a81f3e9e92c1902f0 2024-11-28T07:22:36,860 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/557ddddf4ba74b6a81f3e9e92c1902f0, entries=150, sequenceid=382, filesize=12.0 K 2024-11-28T07:22:36,861 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 936414ebf397eefac328f959953a4d8e in 176ms, sequenceid=382, compaction requested=true 2024-11-28T07:22:36,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:36,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:36,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-28T07:22:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-28T07:22:36,867 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-28T07:22:36,867 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5640 sec 2024-11-28T07:22:36,869 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 1.5700 sec 2024-11-28T07:22:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:37,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:22:37,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:37,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:37,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:37,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:37,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:37,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:37,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/02636273d5d543e39d95053ac93c3205 is 50, key is test_row_0/A:col10/1732778557076/Put/seqid=0 2024-11-28T07:22:37,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742189_1365 (size=14741) 2024-11-28T07:22:37,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/02636273d5d543e39d95053ac93c3205 2024-11-28T07:22:37,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/fcfb3252e1f240248318da598c3d162e is 50, key is test_row_0/B:col10/1732778557076/Put/seqid=0 2024-11-28T07:22:37,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742190_1366 (size=12301) 2024-11-28T07:22:37,183 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/fcfb3252e1f240248318da598c3d162e 2024-11-28T07:22:37,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/8b9a933185ea40b4a5ddd605e284233b is 50, key is test_row_0/C:col10/1732778557076/Put/seqid=0 2024-11-28T07:22:37,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778617223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778617230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778617232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778617247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742191_1367 (size=12301) 2024-11-28T07:22:37,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/8b9a933185ea40b4a5ddd605e284233b 2024-11-28T07:22:37,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/02636273d5d543e39d95053ac93c3205 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/02636273d5d543e39d95053ac93c3205 2024-11-28T07:22:37,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/02636273d5d543e39d95053ac93c3205, entries=200, sequenceid=394, filesize=14.4 K 2024-11-28T07:22:37,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/fcfb3252e1f240248318da598c3d162e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fcfb3252e1f240248318da598c3d162e 2024-11-28T07:22:37,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fcfb3252e1f240248318da598c3d162e, entries=150, sequenceid=394, filesize=12.0 K 2024-11-28T07:22:37,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/8b9a933185ea40b4a5ddd605e284233b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/8b9a933185ea40b4a5ddd605e284233b 2024-11-28T07:22:37,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/8b9a933185ea40b4a5ddd605e284233b, entries=150, sequenceid=394, filesize=12.0 K 2024-11-28T07:22:37,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 936414ebf397eefac328f959953a4d8e in 222ms, sequenceid=394, compaction requested=true 2024-11-28T07:22:37,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:37,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:37,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:37,305 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:37,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:37,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:37,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:37,305 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:37,305 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:37,307 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54902 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:37,307 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:37,307 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:37,307 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e5bcd7d353374fba81be5285b02cb645, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf18fa92aaa24a85a76a680f6e5f666e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/bcc184a53c8f4f05b5ff6d363ccb2970, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/02636273d5d543e39d95053ac93c3205] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=53.6 K 2024-11-28T07:22:37,308 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:37,308 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:37,308 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:37,308 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a6b63ecead7b4a1084f59101de93dd2d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d542edef09cb43e58affb03fac775d95, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/df9bba620c48460ebd93e4599f11b7de, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fcfb3252e1f240248318da598c3d162e] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=48.8 K 2024-11-28T07:22:37,308 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5bcd7d353374fba81be5285b02cb645, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732778554882 2024-11-28T07:22:37,308 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a6b63ecead7b4a1084f59101de93dd2d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732778554882 2024-11-28T07:22:37,309 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf18fa92aaa24a85a76a680f6e5f666e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732778555277 2024-11-28T07:22:37,309 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d542edef09cb43e58affb03fac775d95, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732778555277 2024-11-28T07:22:37,309 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting df9bba620c48460ebd93e4599f11b7de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732778555925 2024-11-28T07:22:37,309 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcc184a53c8f4f05b5ff6d363ccb2970, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732778555925 2024-11-28T07:22:37,310 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting fcfb3252e1f240248318da598c3d162e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732778557074 2024-11-28T07:22:37,310 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02636273d5d543e39d95053ac93c3205, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732778557074 2024-11-28T07:22:37,328 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#316 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:37,329 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/55f4270ebc024d8db383075037f25fd4 is 50, key is test_row_0/B:col10/1732778557076/Put/seqid=0 2024-11-28T07:22:37,339 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#317 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:37,339 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/5ba419be26d14062b5fe7b7e32eac4c0 is 50, key is test_row_0/A:col10/1732778557076/Put/seqid=0 2024-11-28T07:22:37,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:37,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T07:22:37,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:37,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:37,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:37,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:37,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:37,347 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:37,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742192_1368 (size=13255) 2024-11-28T07:22:37,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/06121d75125a427a88d3f2b16bb0786e is 50, key is test_row_0/A:col10/1732778557345/Put/seqid=0 2024-11-28T07:22:37,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778617360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778617364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778617361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742193_1369 (size=13255) 2024-11-28T07:22:37,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778617366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,379 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/5ba419be26d14062b5fe7b7e32eac4c0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5ba419be26d14062b5fe7b7e32eac4c0 2024-11-28T07:22:37,384 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into 5ba419be26d14062b5fe7b7e32eac4c0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:37,384 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:37,384 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=12, startTime=1732778557305; duration=0sec 2024-11-28T07:22:37,384 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:37,384 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:37,384 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:37,394 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:37,394 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:37,394 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:37,394 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9c1c1f664ee381cc4869d06aa629, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/30e5bac54b8f4c81917bfecb11538328, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/557ddddf4ba74b6a81f3e9e92c1902f0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/8b9a933185ea40b4a5ddd605e284233b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=48.8 K 2024-11-28T07:22:37,395 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93ab9c1c1f664ee381cc4869d06aa629, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732778554882 2024-11-28T07:22:37,395 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30e5bac54b8f4c81917bfecb11538328, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732778555277 2024-11-28T07:22:37,395 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 557ddddf4ba74b6a81f3e9e92c1902f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1732778555925 2024-11-28T07:22:37,396 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b9a933185ea40b4a5ddd605e284233b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732778557074 2024-11-28T07:22:37,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T07:22:37,407 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-28T07:22:37,409 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees 2024-11-28T07:22:37,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T07:22:37,418 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:37,419 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:37,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:37,427 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#319 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:37,428 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/515812e3dba640f5b1e5da8c36ca4312 is 50, key is test_row_0/C:col10/1732778557076/Put/seqid=0 2024-11-28T07:22:37,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742194_1370 (size=14741) 2024-11-28T07:22:37,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742195_1371 (size=13255) 2024-11-28T07:22:37,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778617467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778617473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778617474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778617474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T07:22:37,571 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:37,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-28T07:22:37,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:37,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:37,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:37,572 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:37,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:37,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:37,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778617673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778617679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778617680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778617682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T07:22:37,727 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:37,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-28T07:22:37,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:37,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:37,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:37,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:37,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:37,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:37,762 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/55f4270ebc024d8db383075037f25fd4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/55f4270ebc024d8db383075037f25fd4 2024-11-28T07:22:37,770 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into 55f4270ebc024d8db383075037f25fd4(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:37,770 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:37,770 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=12, startTime=1732778557305; duration=0sec 2024-11-28T07:22:37,770 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:37,771 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:37,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/06121d75125a427a88d3f2b16bb0786e 2024-11-28T07:22:37,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/37981c8993cf4da1ae2edc536fee8883 is 50, key is test_row_0/B:col10/1732778557345/Put/seqid=0 2024-11-28T07:22:37,870 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/515812e3dba640f5b1e5da8c36ca4312 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/515812e3dba640f5b1e5da8c36ca4312 2024-11-28T07:22:37,882 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:37,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-28T07:22:37,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:37,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:37,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:37,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] handler.RSProcedureHandler(58): pid=94 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:37,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=94 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:37,894 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into 515812e3dba640f5b1e5da8c36ca4312(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:37,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=94 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:37,894 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:37,894 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=12, startTime=1732778557305; duration=0sec 2024-11-28T07:22:37,894 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:37,894 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:37,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742196_1372 (size=12301) 2024-11-28T07:22:37,898 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/37981c8993cf4da1ae2edc536fee8883 2024-11-28T07:22:37,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ac3c52d78ea94cfeab96ce5b1ed33641 is 50, key is test_row_0/C:col10/1732778557345/Put/seqid=0 2024-11-28T07:22:37,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742197_1373 (size=12301) 2024-11-28T07:22:37,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=420 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ac3c52d78ea94cfeab96ce5b1ed33641 2024-11-28T07:22:37,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/06121d75125a427a88d3f2b16bb0786e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/06121d75125a427a88d3f2b16bb0786e 2024-11-28T07:22:37,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/06121d75125a427a88d3f2b16bb0786e, entries=200, sequenceid=420, filesize=14.4 K 2024-11-28T07:22:37,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/37981c8993cf4da1ae2edc536fee8883 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/37981c8993cf4da1ae2edc536fee8883 2024-11-28T07:22:37,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/37981c8993cf4da1ae2edc536fee8883, entries=150, sequenceid=420, filesize=12.0 K 2024-11-28T07:22:37,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ac3c52d78ea94cfeab96ce5b1ed33641 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ac3c52d78ea94cfeab96ce5b1ed33641 2024-11-28T07:22:37,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778617979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778617988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778617990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:37,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778617990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:37,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ac3c52d78ea94cfeab96ce5b1ed33641, entries=150, sequenceid=420, filesize=12.0 K 2024-11-28T07:22:37,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 936414ebf397eefac328f959953a4d8e in 649ms, sequenceid=420, compaction requested=false 2024-11-28T07:22:37,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T07:22:38,047 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:38,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-11-28T07:22:38,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:38,048 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-28T07:22:38,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:38,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:38,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:38,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:38,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:38,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:38,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/fff1873d0f07473991d2c242e2b83833 is 50, key is test_row_0/A:col10/1732778557362/Put/seqid=0 2024-11-28T07:22:38,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742198_1374 (size=12301) 2024-11-28T07:22:38,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:38,519 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/fff1873d0f07473991d2c242e2b83833 2024-11-28T07:22:38,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T07:22:38,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778618530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778618532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778618533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778618534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/d0615a311a5f433280ea56eae2342122 is 50, key is test_row_0/B:col10/1732778557362/Put/seqid=0 2024-11-28T07:22:38,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742199_1375 (size=12301) 2024-11-28T07:22:38,606 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/d0615a311a5f433280ea56eae2342122 2024-11-28T07:22:38,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1ab52de64e434ff4a60ea71bb4c20171 is 50, key is test_row_0/C:col10/1732778557362/Put/seqid=0 2024-11-28T07:22:38,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778618636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778618638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778618641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778618641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742200_1376 (size=12301) 2024-11-28T07:22:38,684 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1ab52de64e434ff4a60ea71bb4c20171 2024-11-28T07:22:38,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/fff1873d0f07473991d2c242e2b83833 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/fff1873d0f07473991d2c242e2b83833 2024-11-28T07:22:38,702 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/fff1873d0f07473991d2c242e2b83833, entries=150, sequenceid=433, filesize=12.0 K 2024-11-28T07:22:38,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/d0615a311a5f433280ea56eae2342122 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d0615a311a5f433280ea56eae2342122 2024-11-28T07:22:38,717 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d0615a311a5f433280ea56eae2342122, entries=150, sequenceid=433, filesize=12.0 K 2024-11-28T07:22:38,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/1ab52de64e434ff4a60ea71bb4c20171 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1ab52de64e434ff4a60ea71bb4c20171 2024-11-28T07:22:38,729 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1ab52de64e434ff4a60ea71bb4c20171, entries=150, sequenceid=433, filesize=12.0 K 2024-11-28T07:22:38,730 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 936414ebf397eefac328f959953a4d8e in 683ms, sequenceid=433, compaction requested=true 2024-11-28T07:22:38,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:38,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:38,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=94 2024-11-28T07:22:38,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=94 2024-11-28T07:22:38,733 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-28T07:22:38,733 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3130 sec 2024-11-28T07:22:38,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees in 1.3250 sec 2024-11-28T07:22:38,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-28T07:22:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:38,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:38,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:38,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:38,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:38,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:38,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:38,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778618862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/45bcfff57328460fb51a7b9b7937b711 is 50, key is test_row_0/A:col10/1732778558851/Put/seqid=0 2024-11-28T07:22:38,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778618863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778618867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778618868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742201_1377 (size=17181) 2024-11-28T07:22:38,922 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/45bcfff57328460fb51a7b9b7937b711 2024-11-28T07:22:38,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/d77ced35fa1640f296e65f8dc418f968 is 50, key is test_row_0/B:col10/1732778558851/Put/seqid=0 2024-11-28T07:22:38,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778618970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778618972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778618975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778618976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:38,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742202_1378 (size=12301) 2024-11-28T07:22:39,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778619174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:39,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778619184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:39,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778619184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:39,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778619184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:39,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/d77ced35fa1640f296e65f8dc418f968 2024-11-28T07:22:39,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ca2fb820cc9d4545a5402900e56d0746 is 50, key is test_row_0/C:col10/1732778558851/Put/seqid=0 2024-11-28T07:22:39,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:39,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778619483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:39,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742203_1379 (size=12301) 2024-11-28T07:22:39,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ca2fb820cc9d4545a5402900e56d0746 2024-11-28T07:22:39,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:39,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778619492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:39,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:39,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778619492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:39,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:39,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778619493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:39,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/45bcfff57328460fb51a7b9b7937b711 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/45bcfff57328460fb51a7b9b7937b711 2024-11-28T07:22:39,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T07:22:39,524 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-28T07:22:39,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/45bcfff57328460fb51a7b9b7937b711, entries=250, sequenceid=461, filesize=16.8 K 2024-11-28T07:22:39,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/d77ced35fa1640f296e65f8dc418f968 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d77ced35fa1640f296e65f8dc418f968 2024-11-28T07:22:39,527 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:39,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees 2024-11-28T07:22:39,532 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:39,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d77ced35fa1640f296e65f8dc418f968, entries=150, sequenceid=461, filesize=12.0 K 2024-11-28T07:22:39,533 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:39,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ca2fb820cc9d4545a5402900e56d0746 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ca2fb820cc9d4545a5402900e56d0746 2024-11-28T07:22:39,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:39,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-28T07:22:39,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ca2fb820cc9d4545a5402900e56d0746, entries=150, sequenceid=461, filesize=12.0 K 2024-11-28T07:22:39,552 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 936414ebf397eefac328f959953a4d8e in 700ms, sequenceid=461, compaction requested=true 2024-11-28T07:22:39,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:39,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:39,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:39,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:39,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:22:39,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:39,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T07:22:39,553 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:39,553 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:39,555 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57478 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:39,556 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:39,556 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:39,556 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5ba419be26d14062b5fe7b7e32eac4c0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/06121d75125a427a88d3f2b16bb0786e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/fff1873d0f07473991d2c242e2b83833, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/45bcfff57328460fb51a7b9b7937b711] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=56.1 K 2024-11-28T07:22:39,556 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:39,556 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:39,556 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:39,556 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/515812e3dba640f5b1e5da8c36ca4312, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ac3c52d78ea94cfeab96ce5b1ed33641, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1ab52de64e434ff4a60ea71bb4c20171, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ca2fb820cc9d4545a5402900e56d0746] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=49.0 K 2024-11-28T07:22:39,557 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 515812e3dba640f5b1e5da8c36ca4312, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732778557074 2024-11-28T07:22:39,557 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ba419be26d14062b5fe7b7e32eac4c0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732778557074 2024-11-28T07:22:39,557 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ac3c52d78ea94cfeab96ce5b1ed33641, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732778557214 2024-11-28T07:22:39,558 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06121d75125a427a88d3f2b16bb0786e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732778557214 2024-11-28T07:22:39,558 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ab52de64e434ff4a60ea71bb4c20171, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732778557350 2024-11-28T07:22:39,558 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting fff1873d0f07473991d2c242e2b83833, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732778557350 2024-11-28T07:22:39,558 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ca2fb820cc9d4545a5402900e56d0746, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732778558516 2024-11-28T07:22:39,558 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45bcfff57328460fb51a7b9b7937b711, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732778558516 2024-11-28T07:22:39,580 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#328 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:39,581 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/cff72a0a3bab439fbf224ee45c4c459b is 50, key is test_row_0/C:col10/1732778558851/Put/seqid=0 2024-11-28T07:22:39,589 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#329 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:39,590 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/30ef015c20d14e29945d9517e771b061 is 50, key is test_row_0/A:col10/1732778558851/Put/seqid=0 2024-11-28T07:22:39,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742204_1380 (size=13391) 2024-11-28T07:22:39,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-28T07:22:39,639 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/cff72a0a3bab439fbf224ee45c4c459b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cff72a0a3bab439fbf224ee45c4c459b 2024-11-28T07:22:39,645 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into cff72a0a3bab439fbf224ee45c4c459b(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:39,646 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:39,646 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=12, startTime=1732778559552; duration=0sec 2024-11-28T07:22:39,646 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:39,646 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:39,646 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:39,648 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:39,648 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:39,648 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:39,648 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/55f4270ebc024d8db383075037f25fd4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/37981c8993cf4da1ae2edc536fee8883, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d0615a311a5f433280ea56eae2342122, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d77ced35fa1640f296e65f8dc418f968] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=49.0 K 2024-11-28T07:22:39,649 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 55f4270ebc024d8db383075037f25fd4, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1732778557074 2024-11-28T07:22:39,649 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 37981c8993cf4da1ae2edc536fee8883, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=420, earliestPutTs=1732778557214 2024-11-28T07:22:39,650 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d0615a311a5f433280ea56eae2342122, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732778557350 2024-11-28T07:22:39,651 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d77ced35fa1640f296e65f8dc418f968, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732778558516 2024-11-28T07:22:39,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742205_1381 (size=13391) 2024-11-28T07:22:39,682 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#330 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:39,683 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/708a0b71f2d54470bf2e64cbf47b7b14 is 50, key is test_row_0/B:col10/1732778558851/Put/seqid=0 2024-11-28T07:22:39,686 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:39,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-11-28T07:22:39,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:39,687 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-28T07:22:39,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:39,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:39,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:39,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:39,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:39,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:39,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/a257d2ccc116461186b17c800a2c354c is 50, key is test_row_0/A:col10/1732778558866/Put/seqid=0 2024-11-28T07:22:39,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742206_1382 (size=13391) 2024-11-28T07:22:39,754 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/708a0b71f2d54470bf2e64cbf47b7b14 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/708a0b71f2d54470bf2e64cbf47b7b14 2024-11-28T07:22:39,761 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into 708a0b71f2d54470bf2e64cbf47b7b14(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:39,761 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:39,761 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=12, startTime=1732778559552; duration=0sec 2024-11-28T07:22:39,761 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:39,761 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:39,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742207_1383 (size=9857) 2024-11-28T07:22:39,775 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/a257d2ccc116461186b17c800a2c354c 2024-11-28T07:22:39,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/f56514054ae747d1b22baebda2cfb2e9 is 50, key is test_row_0/B:col10/1732778558866/Put/seqid=0 2024-11-28T07:22:39,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-28T07:22:39,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742208_1384 (size=9857) 2024-11-28T07:22:39,844 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/f56514054ae747d1b22baebda2cfb2e9 2024-11-28T07:22:39,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ba18e83602d54ee79b6e8e62a11a2dc4 is 50, key is test_row_0/C:col10/1732778558866/Put/seqid=0 2024-11-28T07:22:39,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742209_1385 (size=9857) 2024-11-28T07:22:40,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:40,068 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/30ef015c20d14e29945d9517e771b061 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/30ef015c20d14e29945d9517e771b061 2024-11-28T07:22:40,074 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into 30ef015c20d14e29945d9517e771b061(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:40,074 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:40,074 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=12, startTime=1732778559552; duration=0sec 2024-11-28T07:22:40,074 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:40,074 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:40,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778620076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778620078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778620081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778620081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-28T07:22:40,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778620188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778620190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778620190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778620198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,317 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ba18e83602d54ee79b6e8e62a11a2dc4 2024-11-28T07:22:40,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/a257d2ccc116461186b17c800a2c354c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a257d2ccc116461186b17c800a2c354c 2024-11-28T07:22:40,329 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a257d2ccc116461186b17c800a2c354c, entries=100, sequenceid=471, filesize=9.6 K 2024-11-28T07:22:40,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/f56514054ae747d1b22baebda2cfb2e9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/f56514054ae747d1b22baebda2cfb2e9 2024-11-28T07:22:40,340 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/f56514054ae747d1b22baebda2cfb2e9, entries=100, sequenceid=471, filesize=9.6 K 2024-11-28T07:22:40,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/ba18e83602d54ee79b6e8e62a11a2dc4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba18e83602d54ee79b6e8e62a11a2dc4 2024-11-28T07:22:40,350 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba18e83602d54ee79b6e8e62a11a2dc4, entries=100, sequenceid=471, filesize=9.6 K 2024-11-28T07:22:40,351 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 936414ebf397eefac328f959953a4d8e in 664ms, sequenceid=471, compaction requested=false 2024-11-28T07:22:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=96 2024-11-28T07:22:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=96 2024-11-28T07:22:40,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-28T07:22:40,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 820 msec 2024-11-28T07:22:40,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees in 828 msec 2024-11-28T07:22:40,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:40,405 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-28T07:22:40,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:40,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:40,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:40,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:40,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:40,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:40,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778620408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778620410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778620414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778620414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/b26628334c904d56a32723502287bc1f is 50, key is test_row_0/A:col10/1732778560405/Put/seqid=0 2024-11-28T07:22:40,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742210_1386 (size=14741) 2024-11-28T07:22:40,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778620515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778620515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778620519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778620519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-28T07:22:40,637 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-11-28T07:22:40,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:40,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees 2024-11-28T07:22:40,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-28T07:22:40,640 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=97, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:40,641 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=97, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:40,641 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:40,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778620724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778620725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778620725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:40,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778620726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:40,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-28T07:22:40,798 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:40,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-28T07:22:40,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:40,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:40,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:40,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:40,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:40,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:40,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/b26628334c904d56a32723502287bc1f 2024-11-28T07:22:40,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/4944073576794dfb9b44e1e3b49676bc is 50, key is test_row_0/B:col10/1732778560405/Put/seqid=0 2024-11-28T07:22:40,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-28T07:22:40,952 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:40,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-28T07:22:40,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742211_1387 (size=12301) 2024-11-28T07:22:40,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:40,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:40,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:40,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:40,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:40,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:41,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778621027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:41,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:41,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778621027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:41,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:41,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778621029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:41,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:41,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778621030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:41,106 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:41,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-28T07:22:41,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:41,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-28T07:22:41,259 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:41,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-28T07:22:41,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:41,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/4944073576794dfb9b44e1e3b49676bc 2024-11-28T07:22:41,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/6362d7de81b348bcb763c0ddd30f10a1 is 50, key is test_row_0/C:col10/1732778560405/Put/seqid=0 2024-11-28T07:22:41,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742212_1388 (size=12301) 2024-11-28T07:22:41,414 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:41,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-28T07:22:41,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:41,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:41,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49618 deadline: 1732778621533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:41,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:41,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49590 deadline: 1732778621534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:41,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:41,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49648 deadline: 1732778621538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:41,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:41,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49616 deadline: 1732778621540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:41,567 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:41,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-28T07:22:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,720 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:41,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-28T07:22:41,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. as already flushing 2024-11-28T07:22:41,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,721 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] handler.RSProcedureHandler(58): pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=98 java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=98 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:41,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-28T07:22:41,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/6362d7de81b348bcb763c0ddd30f10a1 2024-11-28T07:22:41,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/b26628334c904d56a32723502287bc1f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/b26628334c904d56a32723502287bc1f 2024-11-28T07:22:41,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/b26628334c904d56a32723502287bc1f, entries=200, sequenceid=504, filesize=14.4 K 2024-11-28T07:22:41,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/4944073576794dfb9b44e1e3b49676bc as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4944073576794dfb9b44e1e3b49676bc 2024-11-28T07:22:41,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4944073576794dfb9b44e1e3b49676bc, entries=150, sequenceid=504, filesize=12.0 K 2024-11-28T07:22:41,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/6362d7de81b348bcb763c0ddd30f10a1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/6362d7de81b348bcb763c0ddd30f10a1 2024-11-28T07:22:41,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/6362d7de81b348bcb763c0ddd30f10a1, entries=150, sequenceid=504, filesize=12.0 K 2024-11-28T07:22:41,869 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=20.13 KB/20610 for 936414ebf397eefac328f959953a4d8e in 1464ms, sequenceid=504, compaction requested=true 2024-11-28T07:22:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:41,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:41,869 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:41,869 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:41,870 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37989 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:41,870 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/A is initiating minor compaction (all files) 2024-11-28T07:22:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 936414ebf397eefac328f959953a4d8e:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:41,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:41,870 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/A in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,870 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/30ef015c20d14e29945d9517e771b061, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a257d2ccc116461186b17c800a2c354c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/b26628334c904d56a32723502287bc1f] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=37.1 K 2024-11-28T07:22:41,871 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:41,871 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/B is initiating minor compaction (all files) 2024-11-28T07:22:41,871 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/B in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,871 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/708a0b71f2d54470bf2e64cbf47b7b14, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/f56514054ae747d1b22baebda2cfb2e9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4944073576794dfb9b44e1e3b49676bc] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=34.7 K 2024-11-28T07:22:41,871 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30ef015c20d14e29945d9517e771b061, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732778558516 2024-11-28T07:22:41,871 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 708a0b71f2d54470bf2e64cbf47b7b14, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732778558516 2024-11-28T07:22:41,872 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a257d2ccc116461186b17c800a2c354c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1732778558865 2024-11-28T07:22:41,872 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting f56514054ae747d1b22baebda2cfb2e9, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1732778558865 2024-11-28T07:22:41,872 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b26628334c904d56a32723502287bc1f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=504, earliestPutTs=1732778560076 2024-11-28T07:22:41,872 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4944073576794dfb9b44e1e3b49676bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=504, earliestPutTs=1732778560076 2024-11-28T07:22:41,873 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:41,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-11-28T07:22:41,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,874 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-28T07:22:41,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:41,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:41,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:41,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:41,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:41,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:41,894 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#B#compaction#337 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:41,894 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/e47ab34b930f4e468de14a0847c01de1 is 50, key is test_row_0/B:col10/1732778560405/Put/seqid=0 2024-11-28T07:22:41,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/5bb248d38663415ebc55b49b78c58722 is 50, key is test_row_0/A:col10/1732778560410/Put/seqid=0 2024-11-28T07:22:41,907 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#A#compaction#339 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:41,907 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/5c20f31a16184f2f80318d04db7cac1b is 50, key is test_row_0/A:col10/1732778560405/Put/seqid=0 2024-11-28T07:22:41,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742213_1389 (size=13493) 2024-11-28T07:22:41,949 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/e47ab34b930f4e468de14a0847c01de1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e47ab34b930f4e468de14a0847c01de1 2024-11-28T07:22:41,955 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/B of 936414ebf397eefac328f959953a4d8e into e47ab34b930f4e468de14a0847c01de1(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:41,955 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:41,955 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/B, priority=13, startTime=1732778561869; duration=0sec 2024-11-28T07:22:41,955 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:41,955 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:B 2024-11-28T07:22:41,955 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:41,957 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:41,957 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 936414ebf397eefac328f959953a4d8e/C is initiating minor compaction (all files) 2024-11-28T07:22:41,957 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 936414ebf397eefac328f959953a4d8e/C in TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:41,957 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cff72a0a3bab439fbf224ee45c4c459b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba18e83602d54ee79b6e8e62a11a2dc4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/6362d7de81b348bcb763c0ddd30f10a1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp, totalSize=34.7 K 2024-11-28T07:22:41,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742214_1390 (size=12301) 2024-11-28T07:22:41,958 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting cff72a0a3bab439fbf224ee45c4c459b, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=461, earliestPutTs=1732778558516 2024-11-28T07:22:41,958 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ba18e83602d54ee79b6e8e62a11a2dc4, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=471, earliestPutTs=1732778558865 2024-11-28T07:22:41,959 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/5bb248d38663415ebc55b49b78c58722 2024-11-28T07:22:41,960 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 6362d7de81b348bcb763c0ddd30f10a1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=504, earliestPutTs=1732778560076 2024-11-28T07:22:41,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742215_1391 (size=13493) 2024-11-28T07:22:41,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/5269ca56313c448e8ef759fb97ae673f is 50, key is test_row_0/B:col10/1732778560410/Put/seqid=0 2024-11-28T07:22:41,977 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/5c20f31a16184f2f80318d04db7cac1b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5c20f31a16184f2f80318d04db7cac1b 2024-11-28T07:22:41,984 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/A of 936414ebf397eefac328f959953a4d8e into 5c20f31a16184f2f80318d04db7cac1b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:41,984 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:41,984 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/A, priority=13, startTime=1732778561869; duration=0sec 2024-11-28T07:22:41,984 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:41,984 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:A 2024-11-28T07:22:41,985 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 936414ebf397eefac328f959953a4d8e#C#compaction#341 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:41,986 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/0a67e43fb4c544ecbff9ddc25ed1c71e is 50, key is test_row_0/C:col10/1732778560405/Put/seqid=0 2024-11-28T07:22:42,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742217_1393 (size=13493) 2024-11-28T07:22:42,037 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/0a67e43fb4c544ecbff9ddc25ed1c71e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/0a67e43fb4c544ecbff9ddc25ed1c71e 2024-11-28T07:22:42,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742216_1392 (size=12301) 2024-11-28T07:22:42,062 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/5269ca56313c448e8ef759fb97ae673f 2024-11-28T07:22:42,070 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 936414ebf397eefac328f959953a4d8e/C of 936414ebf397eefac328f959953a4d8e into 0a67e43fb4c544ecbff9ddc25ed1c71e(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:42,070 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:42,070 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e., storeName=936414ebf397eefac328f959953a4d8e/C, priority=13, startTime=1732778561870; duration=0sec 2024-11-28T07:22:42,070 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:42,070 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 936414ebf397eefac328f959953a4d8e:C 2024-11-28T07:22:42,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/536e86788ba841a0bf00799058c20a8d is 50, key is test_row_0/C:col10/1732778560410/Put/seqid=0 2024-11-28T07:22:42,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742218_1394 (size=12301) 2024-11-28T07:22:42,385 DEBUG [Thread-1294 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0eb04aeb to 127.0.0.1:56318 2024-11-28T07:22:42,385 DEBUG [Thread-1298 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:56318 2024-11-28T07:22:42,385 DEBUG [Thread-1298 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:42,386 DEBUG [Thread-1302 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1730a60f to 127.0.0.1:56318 2024-11-28T07:22:42,386 DEBUG [Thread-1302 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:42,386 DEBUG [Thread-1300 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e6bf6a to 127.0.0.1:56318 2024-11-28T07:22:42,386 DEBUG [Thread-1300 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:42,386 DEBUG [Thread-1294 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:42,387 DEBUG [Thread-1296 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:56318 2024-11-28T07:22:42,387 DEBUG [Thread-1296 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:42,534 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/536e86788ba841a0bf00799058c20a8d 2024-11-28T07:22:42,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/5bb248d38663415ebc55b49b78c58722 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5bb248d38663415ebc55b49b78c58722 2024-11-28T07:22:42,547 DEBUG [Thread-1283 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6862e3ce to 127.0.0.1:56318 2024-11-28T07:22:42,547 DEBUG [Thread-1283 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:42,549 DEBUG [Thread-1287 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:56318 2024-11-28T07:22:42,550 DEBUG [Thread-1287 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:42,550 DEBUG [Thread-1291 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560ec309 to 127.0.0.1:56318 2024-11-28T07:22:42,550 DEBUG [Thread-1291 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:42,552 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5bb248d38663415ebc55b49b78c58722, entries=150, sequenceid=510, filesize=12.0 K 2024-11-28T07:22:42,553 DEBUG [Thread-1289 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43f04e0e to 127.0.0.1:56318 2024-11-28T07:22:42,553 DEBUG [Thread-1289 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:42,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/5269ca56313c448e8ef759fb97ae673f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/5269ca56313c448e8ef759fb97ae673f 2024-11-28T07:22:42,559 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/5269ca56313c448e8ef759fb97ae673f, entries=150, sequenceid=510, filesize=12.0 K 2024-11-28T07:22:42,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/536e86788ba841a0bf00799058c20a8d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/536e86788ba841a0bf00799058c20a8d 2024-11-28T07:22:42,565 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/536e86788ba841a0bf00799058c20a8d, entries=150, sequenceid=510, filesize=12.0 K 2024-11-28T07:22:42,566 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=26.84 KB/27480 for 936414ebf397eefac328f959953a4d8e in 692ms, sequenceid=510, compaction requested=false 2024-11-28T07:22:42,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2538): Flush status journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:42,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:42,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=98 2024-11-28T07:22:42,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=98 2024-11-28T07:22:42,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-28T07:22:42,570 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9270 sec 2024-11-28T07:22:42,572 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees in 1.9310 sec 2024-11-28T07:22:42,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-28T07:22:42,756 INFO [Thread-1293 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-28T07:22:42,847 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T07:22:44,155 DEBUG [Thread-1285 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d296fed to 127.0.0.1:56318 2024-11-28T07:22:44,155 DEBUG [Thread-1285 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:44,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T07:22:44,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 116 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 22 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 101 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3333 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3320 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3170 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3350 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3400 2024-11-28T07:22:44,156 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T07:22:44,156 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T07:22:44,156 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a569490 to 127.0.0.1:56318 2024-11-28T07:22:44,156 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:22:44,156 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T07:22:44,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T07:22:44,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:44,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-28T07:22:44,160 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778564160"}]},"ts":"1732778564160"} 2024-11-28T07:22:44,161 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T07:22:44,163 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T07:22:44,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T07:22:44,165 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=936414ebf397eefac328f959953a4d8e, UNASSIGN}] 2024-11-28T07:22:44,165 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=936414ebf397eefac328f959953a4d8e, UNASSIGN 2024-11-28T07:22:44,166 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=936414ebf397eefac328f959953a4d8e, regionState=CLOSING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:22:44,166 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T07:22:44,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; CloseRegionProcedure 936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:22:44,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-28T07:22:44,318 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:44,318 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(124): Close 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:44,318 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T07:22:44,318 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1681): Closing 936414ebf397eefac328f959953a4d8e, disabling compactions & flushes 2024-11-28T07:22:44,318 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:44,318 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:44,318 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. after waiting 0 ms 2024-11-28T07:22:44,318 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:44,318 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(2837): Flushing 936414ebf397eefac328f959953a4d8e 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-28T07:22:44,318 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=A 2024-11-28T07:22:44,319 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:44,319 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=B 2024-11-28T07:22:44,319 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:44,319 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 936414ebf397eefac328f959953a4d8e, store=C 2024-11-28T07:22:44,319 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:44,322 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/6003e30048d4432c9fe07daec263e67c is 50, key is test_row_0/A:col10/1732778562545/Put/seqid=0 2024-11-28T07:22:44,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742219_1395 (size=12301) 2024-11-28T07:22:44,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-28T07:22:44,726 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/6003e30048d4432c9fe07daec263e67c 2024-11-28T07:22:44,732 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/72c53a3627b24784911d96441729c453 is 50, key is test_row_0/B:col10/1732778562545/Put/seqid=0 2024-11-28T07:22:44,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742220_1396 (size=12301) 2024-11-28T07:22:44,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-28T07:22:45,136 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/72c53a3627b24784911d96441729c453 2024-11-28T07:22:45,142 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/0510410023e74f9fbc94ce65a6092581 is 50, key is test_row_0/C:col10/1732778562545/Put/seqid=0 2024-11-28T07:22:45,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742221_1397 (size=12301) 2024-11-28T07:22:45,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-28T07:22:45,545 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=521 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/0510410023e74f9fbc94ce65a6092581 2024-11-28T07:22:45,549 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/A/6003e30048d4432c9fe07daec263e67c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6003e30048d4432c9fe07daec263e67c 2024-11-28T07:22:45,552 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6003e30048d4432c9fe07daec263e67c, entries=150, sequenceid=521, filesize=12.0 K 2024-11-28T07:22:45,553 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/B/72c53a3627b24784911d96441729c453 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/72c53a3627b24784911d96441729c453 2024-11-28T07:22:45,556 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/72c53a3627b24784911d96441729c453, entries=150, sequenceid=521, filesize=12.0 K 2024-11-28T07:22:45,556 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/.tmp/C/0510410023e74f9fbc94ce65a6092581 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/0510410023e74f9fbc94ce65a6092581 2024-11-28T07:22:45,559 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/0510410023e74f9fbc94ce65a6092581, entries=150, sequenceid=521, filesize=12.0 K 2024-11-28T07:22:45,560 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 936414ebf397eefac328f959953a4d8e in 1242ms, sequenceid=521, compaction requested=true 2024-11-28T07:22:45,560 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/050fc5c35e9049aabc8d4566de9cd0b2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/df65d557a20745fe849da1ed1513e7c2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cd5afc7c1c3c4beba8f5e94fe2a991f4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/95af2acc507a4469813f13eb933c6ab8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf2d566c458548beaf6d3e06b3f161a4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/c83baabca20049ef9491898ad84c3194, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a27b48fc339e464daa68c9ed18310d6a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f39c237af8314fc3b7b390436c64e54e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e46a4f86b9254e8db51a4c3de457a434, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/71dce6f8a7dd4a7bac403035fe5a2f4f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/53416b46bc5f4b60b4edec7cb0adaf99, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0474204bf8f3474c8fbccef3b846a834, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0811e691addf4e28b23d5d4bfda0d776, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/8ef88c191fe3461fa66bb4b8ec2b38fc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/03ffa8b8fec24834ba7863654318332b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/2f75befefcbf4e8c815ff2b188c0b3af, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f7dab651e27c4457979bcbe92f2b3999, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/ba0d0f1a2862485285e373ac012ab655, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/9eae15e48cbf4f5f9e4d2b93e9db2ea2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/504e9c97889944e09814d187f0cf03df, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/50a210b129a3438ea04a4b3d1f1e2f95, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6e33f417d75845a0a98fc58a86b6af28, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0572904d8a9e4c10800f2a2d449448bc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e5bcd7d353374fba81be5285b02cb645, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/10bc9b8cf2084d91b2644735289f831b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf18fa92aaa24a85a76a680f6e5f666e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/bcc184a53c8f4f05b5ff6d363ccb2970, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/02636273d5d543e39d95053ac93c3205, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5ba419be26d14062b5fe7b7e32eac4c0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/06121d75125a427a88d3f2b16bb0786e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/fff1873d0f07473991d2c242e2b83833, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/45bcfff57328460fb51a7b9b7937b711, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/30ef015c20d14e29945d9517e771b061, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a257d2ccc116461186b17c800a2c354c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/b26628334c904d56a32723502287bc1f] to archive 2024-11-28T07:22:45,561 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:22:45,562 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/050fc5c35e9049aabc8d4566de9cd0b2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/050fc5c35e9049aabc8d4566de9cd0b2 2024-11-28T07:22:45,563 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/df65d557a20745fe849da1ed1513e7c2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/df65d557a20745fe849da1ed1513e7c2 2024-11-28T07:22:45,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cd5afc7c1c3c4beba8f5e94fe2a991f4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cd5afc7c1c3c4beba8f5e94fe2a991f4 2024-11-28T07:22:45,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/95af2acc507a4469813f13eb933c6ab8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/95af2acc507a4469813f13eb933c6ab8 2024-11-28T07:22:45,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf2d566c458548beaf6d3e06b3f161a4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf2d566c458548beaf6d3e06b3f161a4 2024-11-28T07:22:45,567 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/c83baabca20049ef9491898ad84c3194 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/c83baabca20049ef9491898ad84c3194 2024-11-28T07:22:45,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a27b48fc339e464daa68c9ed18310d6a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a27b48fc339e464daa68c9ed18310d6a 2024-11-28T07:22:45,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f39c237af8314fc3b7b390436c64e54e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f39c237af8314fc3b7b390436c64e54e 2024-11-28T07:22:45,569 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e46a4f86b9254e8db51a4c3de457a434 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e46a4f86b9254e8db51a4c3de457a434 2024-11-28T07:22:45,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/71dce6f8a7dd4a7bac403035fe5a2f4f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/71dce6f8a7dd4a7bac403035fe5a2f4f 2024-11-28T07:22:45,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/53416b46bc5f4b60b4edec7cb0adaf99 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/53416b46bc5f4b60b4edec7cb0adaf99 2024-11-28T07:22:45,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0474204bf8f3474c8fbccef3b846a834 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0474204bf8f3474c8fbccef3b846a834 2024-11-28T07:22:45,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0811e691addf4e28b23d5d4bfda0d776 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0811e691addf4e28b23d5d4bfda0d776 2024-11-28T07:22:45,575 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/8ef88c191fe3461fa66bb4b8ec2b38fc to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/8ef88c191fe3461fa66bb4b8ec2b38fc 2024-11-28T07:22:45,576 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/03ffa8b8fec24834ba7863654318332b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/03ffa8b8fec24834ba7863654318332b 2024-11-28T07:22:45,577 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/2f75befefcbf4e8c815ff2b188c0b3af to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/2f75befefcbf4e8c815ff2b188c0b3af 2024-11-28T07:22:45,578 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f7dab651e27c4457979bcbe92f2b3999 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/f7dab651e27c4457979bcbe92f2b3999 2024-11-28T07:22:45,579 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/ba0d0f1a2862485285e373ac012ab655 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/ba0d0f1a2862485285e373ac012ab655 2024-11-28T07:22:45,580 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/9eae15e48cbf4f5f9e4d2b93e9db2ea2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/9eae15e48cbf4f5f9e4d2b93e9db2ea2 2024-11-28T07:22:45,581 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/504e9c97889944e09814d187f0cf03df to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/504e9c97889944e09814d187f0cf03df 2024-11-28T07:22:45,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/50a210b129a3438ea04a4b3d1f1e2f95 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/50a210b129a3438ea04a4b3d1f1e2f95 2024-11-28T07:22:45,582 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6e33f417d75845a0a98fc58a86b6af28 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6e33f417d75845a0a98fc58a86b6af28 2024-11-28T07:22:45,583 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0572904d8a9e4c10800f2a2d449448bc to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/0572904d8a9e4c10800f2a2d449448bc 2024-11-28T07:22:45,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e5bcd7d353374fba81be5285b02cb645 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/e5bcd7d353374fba81be5285b02cb645 2024-11-28T07:22:45,585 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/10bc9b8cf2084d91b2644735289f831b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/10bc9b8cf2084d91b2644735289f831b 2024-11-28T07:22:45,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf18fa92aaa24a85a76a680f6e5f666e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/cf18fa92aaa24a85a76a680f6e5f666e 2024-11-28T07:22:45,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/bcc184a53c8f4f05b5ff6d363ccb2970 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/bcc184a53c8f4f05b5ff6d363ccb2970 2024-11-28T07:22:45,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/02636273d5d543e39d95053ac93c3205 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/02636273d5d543e39d95053ac93c3205 2024-11-28T07:22:45,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5ba419be26d14062b5fe7b7e32eac4c0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5ba419be26d14062b5fe7b7e32eac4c0 2024-11-28T07:22:45,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/06121d75125a427a88d3f2b16bb0786e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/06121d75125a427a88d3f2b16bb0786e 2024-11-28T07:22:45,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/fff1873d0f07473991d2c242e2b83833 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/fff1873d0f07473991d2c242e2b83833 2024-11-28T07:22:45,591 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/45bcfff57328460fb51a7b9b7937b711 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/45bcfff57328460fb51a7b9b7937b711 2024-11-28T07:22:45,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/30ef015c20d14e29945d9517e771b061 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/30ef015c20d14e29945d9517e771b061 2024-11-28T07:22:45,593 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a257d2ccc116461186b17c800a2c354c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/a257d2ccc116461186b17c800a2c354c 2024-11-28T07:22:45,594 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/b26628334c904d56a32723502287bc1f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/b26628334c904d56a32723502287bc1f 2024-11-28T07:22:45,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/52b305f5d93547b7beca575e280b2ab7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0ec23887574c461eb7bed12a03f94789, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/9bb86575e6514d0ba1ae67769b1114c0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/af9470750bfd4fee851764640db4b708, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/8f91965c4ac3450695de47de86e7b669, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/11f28f92da1a4d49845cd70c7ac65f39, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/46f0f9e3528a44609d384e7417d5fd71, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4bc6e7e6ec264ed4aa816b7b5569d490, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0813f6559ad84ce088c661073541e348, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/186c9762df804f8ca6125d8cad8a3d98, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/479a027456f0420c86b32d66fc4c22dd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20ee29c561b4466e88c8ced9fbf3a950, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fb37440bb7d34575bc3b9a6eb626e094, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e12e3d09f25149a0969de2fd84a2af75, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e00b6763605747378c23140378f1220c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/13262c1f99e84e6e805c6dfc7ba49d85, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20abf06d6cc44f72bfc4a0cbf31a4b2e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a662eceb0a5644709dafdc1f6d8d1e38, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/1c3045b6b0ff42ca80b1ff48264200b1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/b14508abf7364a67af06a16008a963f9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/ac39fc3b751c4d1d95f123bd879a0a1a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/610144bb5fd64f908d97455a3752d64f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/99de4a3337064d8b8b3b30d20539b664, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a6b63ecead7b4a1084f59101de93dd2d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/982b890f9823412e873f419cde6d4e64, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d542edef09cb43e58affb03fac775d95, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/df9bba620c48460ebd93e4599f11b7de, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/55f4270ebc024d8db383075037f25fd4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fcfb3252e1f240248318da598c3d162e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/37981c8993cf4da1ae2edc536fee8883, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d0615a311a5f433280ea56eae2342122, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/708a0b71f2d54470bf2e64cbf47b7b14, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d77ced35fa1640f296e65f8dc418f968, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/f56514054ae747d1b22baebda2cfb2e9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4944073576794dfb9b44e1e3b49676bc] to archive 2024-11-28T07:22:45,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:22:45,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/52b305f5d93547b7beca575e280b2ab7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/52b305f5d93547b7beca575e280b2ab7 2024-11-28T07:22:45,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0ec23887574c461eb7bed12a03f94789 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0ec23887574c461eb7bed12a03f94789 2024-11-28T07:22:45,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/9bb86575e6514d0ba1ae67769b1114c0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/9bb86575e6514d0ba1ae67769b1114c0 2024-11-28T07:22:45,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/af9470750bfd4fee851764640db4b708 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/af9470750bfd4fee851764640db4b708 2024-11-28T07:22:45,601 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/8f91965c4ac3450695de47de86e7b669 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/8f91965c4ac3450695de47de86e7b669 2024-11-28T07:22:45,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/11f28f92da1a4d49845cd70c7ac65f39 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/11f28f92da1a4d49845cd70c7ac65f39 2024-11-28T07:22:45,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/46f0f9e3528a44609d384e7417d5fd71 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/46f0f9e3528a44609d384e7417d5fd71 2024-11-28T07:22:45,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4bc6e7e6ec264ed4aa816b7b5569d490 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4bc6e7e6ec264ed4aa816b7b5569d490 2024-11-28T07:22:45,605 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0813f6559ad84ce088c661073541e348 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/0813f6559ad84ce088c661073541e348 2024-11-28T07:22:45,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/186c9762df804f8ca6125d8cad8a3d98 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/186c9762df804f8ca6125d8cad8a3d98 2024-11-28T07:22:45,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/479a027456f0420c86b32d66fc4c22dd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/479a027456f0420c86b32d66fc4c22dd 2024-11-28T07:22:45,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20ee29c561b4466e88c8ced9fbf3a950 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20ee29c561b4466e88c8ced9fbf3a950 2024-11-28T07:22:45,608 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fb37440bb7d34575bc3b9a6eb626e094 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fb37440bb7d34575bc3b9a6eb626e094 2024-11-28T07:22:45,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e12e3d09f25149a0969de2fd84a2af75 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e12e3d09f25149a0969de2fd84a2af75 2024-11-28T07:22:45,610 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e00b6763605747378c23140378f1220c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e00b6763605747378c23140378f1220c 2024-11-28T07:22:45,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/13262c1f99e84e6e805c6dfc7ba49d85 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/13262c1f99e84e6e805c6dfc7ba49d85 2024-11-28T07:22:45,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20abf06d6cc44f72bfc4a0cbf31a4b2e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/20abf06d6cc44f72bfc4a0cbf31a4b2e 2024-11-28T07:22:45,612 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a662eceb0a5644709dafdc1f6d8d1e38 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a662eceb0a5644709dafdc1f6d8d1e38 2024-11-28T07:22:45,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/1c3045b6b0ff42ca80b1ff48264200b1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/1c3045b6b0ff42ca80b1ff48264200b1 2024-11-28T07:22:45,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/b14508abf7364a67af06a16008a963f9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/b14508abf7364a67af06a16008a963f9 2024-11-28T07:22:45,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/ac39fc3b751c4d1d95f123bd879a0a1a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/ac39fc3b751c4d1d95f123bd879a0a1a 2024-11-28T07:22:45,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/610144bb5fd64f908d97455a3752d64f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/610144bb5fd64f908d97455a3752d64f 2024-11-28T07:22:45,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/99de4a3337064d8b8b3b30d20539b664 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/99de4a3337064d8b8b3b30d20539b664 2024-11-28T07:22:45,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a6b63ecead7b4a1084f59101de93dd2d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/a6b63ecead7b4a1084f59101de93dd2d 2024-11-28T07:22:45,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/982b890f9823412e873f419cde6d4e64 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/982b890f9823412e873f419cde6d4e64 2024-11-28T07:22:45,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d542edef09cb43e58affb03fac775d95 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d542edef09cb43e58affb03fac775d95 2024-11-28T07:22:45,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/df9bba620c48460ebd93e4599f11b7de to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/df9bba620c48460ebd93e4599f11b7de 2024-11-28T07:22:45,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/55f4270ebc024d8db383075037f25fd4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/55f4270ebc024d8db383075037f25fd4 2024-11-28T07:22:45,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fcfb3252e1f240248318da598c3d162e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/fcfb3252e1f240248318da598c3d162e 2024-11-28T07:22:45,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/37981c8993cf4da1ae2edc536fee8883 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/37981c8993cf4da1ae2edc536fee8883 2024-11-28T07:22:45,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d0615a311a5f433280ea56eae2342122 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d0615a311a5f433280ea56eae2342122 2024-11-28T07:22:45,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/708a0b71f2d54470bf2e64cbf47b7b14 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/708a0b71f2d54470bf2e64cbf47b7b14 2024-11-28T07:22:45,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d77ced35fa1640f296e65f8dc418f968 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/d77ced35fa1640f296e65f8dc418f968 2024-11-28T07:22:45,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/f56514054ae747d1b22baebda2cfb2e9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/f56514054ae747d1b22baebda2cfb2e9 2024-11-28T07:22:45,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4944073576794dfb9b44e1e3b49676bc to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/4944073576794dfb9b44e1e3b49676bc 2024-11-28T07:22:45,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e55c52651dfd445fa329ff5d97d9c70f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1c9062eb85ad4400a1fb6b32f4a28a69, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/97521e74d92d41e9991506add61cf2dd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1a7ed76a8c89442ea7fbde891539827d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9e894d7d477e9bce6a057138a80b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/4fe310e09be54459871cc58a3a53bc66, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/88a4f09c063d4794b11d6e36f962bb9d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/acbe2b248f2e43bf820d212d45af7a6b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba3bea3044f8460db7a3b26a002be71b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/c6b4fc16ff744659815ef95ecaf16216, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/904195fdba864488bee5a401b7e4a331, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b4405a20657c4b96a76780fd80935a44, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/be01fac6ccf742838f267f8a51b2ae7c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/22507e3d8196459ab58e767f4dbe5be4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e259699a6943473faade20f182fa29d6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/740fe2b632774004b2281ee54fc1cce8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/5701339619cc4dd1ab1a85e7ea053c9a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cc0d68685efb4fdbb11cbd07374dea60, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1d702aaabe9b49c197559b57c202a2af, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/431aa55a35334c7a9a4c2bda9b326839, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/bede5668736c4850b47a1f8fb400ced1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/d047001203e945fb8b02b70c8d59b345, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/84288d9b7e774245a25b557d0208740d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9c1c1f664ee381cc4869d06aa629, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b502948cabfd463e8229c0b9fe1d4e5c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/30e5bac54b8f4c81917bfecb11538328, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/557ddddf4ba74b6a81f3e9e92c1902f0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/515812e3dba640f5b1e5da8c36ca4312, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/8b9a933185ea40b4a5ddd605e284233b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ac3c52d78ea94cfeab96ce5b1ed33641, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1ab52de64e434ff4a60ea71bb4c20171, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cff72a0a3bab439fbf224ee45c4c459b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ca2fb820cc9d4545a5402900e56d0746, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba18e83602d54ee79b6e8e62a11a2dc4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/6362d7de81b348bcb763c0ddd30f10a1] to archive 2024-11-28T07:22:45,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:22:45,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e55c52651dfd445fa329ff5d97d9c70f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e55c52651dfd445fa329ff5d97d9c70f 2024-11-28T07:22:45,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1c9062eb85ad4400a1fb6b32f4a28a69 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1c9062eb85ad4400a1fb6b32f4a28a69 2024-11-28T07:22:45,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/97521e74d92d41e9991506add61cf2dd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/97521e74d92d41e9991506add61cf2dd 2024-11-28T07:22:45,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1a7ed76a8c89442ea7fbde891539827d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1a7ed76a8c89442ea7fbde891539827d 2024-11-28T07:22:45,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9e894d7d477e9bce6a057138a80b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9e894d7d477e9bce6a057138a80b 2024-11-28T07:22:45,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/4fe310e09be54459871cc58a3a53bc66 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/4fe310e09be54459871cc58a3a53bc66 2024-11-28T07:22:45,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/88a4f09c063d4794b11d6e36f962bb9d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/88a4f09c063d4794b11d6e36f962bb9d 2024-11-28T07:22:45,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/acbe2b248f2e43bf820d212d45af7a6b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/acbe2b248f2e43bf820d212d45af7a6b 2024-11-28T07:22:45,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba3bea3044f8460db7a3b26a002be71b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba3bea3044f8460db7a3b26a002be71b 2024-11-28T07:22:45,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/c6b4fc16ff744659815ef95ecaf16216 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/c6b4fc16ff744659815ef95ecaf16216 2024-11-28T07:22:45,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/904195fdba864488bee5a401b7e4a331 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/904195fdba864488bee5a401b7e4a331 2024-11-28T07:22:45,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b4405a20657c4b96a76780fd80935a44 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b4405a20657c4b96a76780fd80935a44 2024-11-28T07:22:45,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/be01fac6ccf742838f267f8a51b2ae7c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/be01fac6ccf742838f267f8a51b2ae7c 2024-11-28T07:22:45,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/22507e3d8196459ab58e767f4dbe5be4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/22507e3d8196459ab58e767f4dbe5be4 2024-11-28T07:22:45,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e259699a6943473faade20f182fa29d6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/e259699a6943473faade20f182fa29d6 2024-11-28T07:22:45,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/740fe2b632774004b2281ee54fc1cce8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/740fe2b632774004b2281ee54fc1cce8 2024-11-28T07:22:45,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/5701339619cc4dd1ab1a85e7ea053c9a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/5701339619cc4dd1ab1a85e7ea053c9a 2024-11-28T07:22:45,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cc0d68685efb4fdbb11cbd07374dea60 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cc0d68685efb4fdbb11cbd07374dea60 2024-11-28T07:22:45,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1d702aaabe9b49c197559b57c202a2af to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1d702aaabe9b49c197559b57c202a2af 2024-11-28T07:22:45,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/431aa55a35334c7a9a4c2bda9b326839 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/431aa55a35334c7a9a4c2bda9b326839 2024-11-28T07:22:45,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/bede5668736c4850b47a1f8fb400ced1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/bede5668736c4850b47a1f8fb400ced1 2024-11-28T07:22:45,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/d047001203e945fb8b02b70c8d59b345 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/d047001203e945fb8b02b70c8d59b345 2024-11-28T07:22:45,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/84288d9b7e774245a25b557d0208740d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/84288d9b7e774245a25b557d0208740d 2024-11-28T07:22:45,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9c1c1f664ee381cc4869d06aa629 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/93ab9c1c1f664ee381cc4869d06aa629 2024-11-28T07:22:45,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b502948cabfd463e8229c0b9fe1d4e5c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/b502948cabfd463e8229c0b9fe1d4e5c 2024-11-28T07:22:45,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/30e5bac54b8f4c81917bfecb11538328 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/30e5bac54b8f4c81917bfecb11538328 2024-11-28T07:22:45,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/557ddddf4ba74b6a81f3e9e92c1902f0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/557ddddf4ba74b6a81f3e9e92c1902f0 2024-11-28T07:22:45,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/515812e3dba640f5b1e5da8c36ca4312 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/515812e3dba640f5b1e5da8c36ca4312 2024-11-28T07:22:45,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/8b9a933185ea40b4a5ddd605e284233b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/8b9a933185ea40b4a5ddd605e284233b 2024-11-28T07:22:45,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ac3c52d78ea94cfeab96ce5b1ed33641 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ac3c52d78ea94cfeab96ce5b1ed33641 2024-11-28T07:22:45,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1ab52de64e434ff4a60ea71bb4c20171 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/1ab52de64e434ff4a60ea71bb4c20171 2024-11-28T07:22:45,663 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cff72a0a3bab439fbf224ee45c4c459b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/cff72a0a3bab439fbf224ee45c4c459b 2024-11-28T07:22:45,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ca2fb820cc9d4545a5402900e56d0746 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ca2fb820cc9d4545a5402900e56d0746 2024-11-28T07:22:45,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba18e83602d54ee79b6e8e62a11a2dc4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/ba18e83602d54ee79b6e8e62a11a2dc4 2024-11-28T07:22:45,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/6362d7de81b348bcb763c0ddd30f10a1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/6362d7de81b348bcb763c0ddd30f10a1 2024-11-28T07:22:45,670 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/recovered.edits/524.seqid, newMaxSeqId=524, maxSeqId=1 2024-11-28T07:22:45,671 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e. 2024-11-28T07:22:45,671 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1635): Region close journal for 936414ebf397eefac328f959953a4d8e: 2024-11-28T07:22:45,672 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(170): Closed 936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:45,673 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=936414ebf397eefac328f959953a4d8e, regionState=CLOSED 2024-11-28T07:22:45,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-28T07:22:45,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseRegionProcedure 936414ebf397eefac328f959953a4d8e, server=592d8b721726,33143,1732778474488 in 1.5080 sec 2024-11-28T07:22:45,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-28T07:22:45,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=936414ebf397eefac328f959953a4d8e, UNASSIGN in 1.5110 sec 2024-11-28T07:22:45,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-28T07:22:45,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5130 sec 2024-11-28T07:22:45,678 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778565678"}]},"ts":"1732778565678"} 2024-11-28T07:22:45,679 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T07:22:45,681 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T07:22:45,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5250 sec 2024-11-28T07:22:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-11-28T07:22:46,263 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 99 completed 2024-11-28T07:22:46,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T07:22:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=103, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:46,264 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=103, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-11-28T07:22:46,265 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=103, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:46,266 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:46,268 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/recovered.edits] 2024-11-28T07:22:46,270 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5bb248d38663415ebc55b49b78c58722 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5bb248d38663415ebc55b49b78c58722 2024-11-28T07:22:46,271 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5c20f31a16184f2f80318d04db7cac1b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/5c20f31a16184f2f80318d04db7cac1b 2024-11-28T07:22:46,272 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6003e30048d4432c9fe07daec263e67c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/A/6003e30048d4432c9fe07daec263e67c 2024-11-28T07:22:46,274 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/5269ca56313c448e8ef759fb97ae673f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/5269ca56313c448e8ef759fb97ae673f 2024-11-28T07:22:46,275 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/72c53a3627b24784911d96441729c453 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/72c53a3627b24784911d96441729c453 2024-11-28T07:22:46,276 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e47ab34b930f4e468de14a0847c01de1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/B/e47ab34b930f4e468de14a0847c01de1 2024-11-28T07:22:46,277 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/0510410023e74f9fbc94ce65a6092581 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/0510410023e74f9fbc94ce65a6092581 2024-11-28T07:22:46,278 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/0a67e43fb4c544ecbff9ddc25ed1c71e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/0a67e43fb4c544ecbff9ddc25ed1c71e 2024-11-28T07:22:46,279 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/536e86788ba841a0bf00799058c20a8d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/C/536e86788ba841a0bf00799058c20a8d 2024-11-28T07:22:46,281 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/recovered.edits/524.seqid to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e/recovered.edits/524.seqid 2024-11-28T07:22:46,282 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/936414ebf397eefac328f959953a4d8e 2024-11-28T07:22:46,282 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T07:22:46,283 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=103, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:46,286 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T07:22:46,287 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T07:22:46,288 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=103, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:46,288 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T07:22:46,288 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732778566288"}]},"ts":"9223372036854775807"} 2024-11-28T07:22:46,290 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T07:22:46,290 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 936414ebf397eefac328f959953a4d8e, NAME => 'TestAcidGuarantees,,1732778540225.936414ebf397eefac328f959953a4d8e.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T07:22:46,290 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T07:22:46,290 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732778566290"}]},"ts":"9223372036854775807"} 2024-11-28T07:22:46,291 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T07:22:46,293 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=103, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:46,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 30 msec 2024-11-28T07:22:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-11-28T07:22:46,366 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 103 completed 2024-11-28T07:22:46,376 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=241 (was 238) - Thread LEAK? -, OpenFileDescriptor=457 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=595 (was 438) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4545 (was 5149) 2024-11-28T07:22:46,386 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=241, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=595, ProcessCount=11, AvailableMemoryMB=4545 2024-11-28T07:22:46,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T07:22:46,387 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:22:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:46,389 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T07:22:46,389 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:46,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 104 2024-11-28T07:22:46,390 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T07:22:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-28T07:22:46,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742222_1398 (size=960) 2024-11-28T07:22:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-28T07:22:46,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-28T07:22:46,797 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e 2024-11-28T07:22:46,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742223_1399 (size=53) 2024-11-28T07:22:46,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-28T07:22:47,202 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:22:47,203 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 92fcea6fc878b4b1c7f03e0a8e3d3d00, disabling compactions & flushes 2024-11-28T07:22:47,203 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:47,203 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:47,203 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. after waiting 0 ms 2024-11-28T07:22:47,203 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:47,203 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:47,203 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:47,204 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T07:22:47,204 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732778567204"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732778567204"}]},"ts":"1732778567204"} 2024-11-28T07:22:47,205 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T07:22:47,206 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T07:22:47,206 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778567206"}]},"ts":"1732778567206"} 2024-11-28T07:22:47,206 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T07:22:47,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, ASSIGN}] 2024-11-28T07:22:47,211 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, ASSIGN 2024-11-28T07:22:47,212 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, ASSIGN; state=OFFLINE, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=false 2024-11-28T07:22:47,362 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=92fcea6fc878b4b1c7f03e0a8e3d3d00, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:22:47,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE; OpenRegionProcedure 92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:22:47,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-28T07:22:47,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:47,517 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:47,517 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(7285): Opening region: {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:22:47,517 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:47,517 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:22:47,518 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(7327): checking encryption for 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:47,518 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(7330): checking classloading for 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:47,519 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:47,520 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:22:47,521 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92fcea6fc878b4b1c7f03e0a8e3d3d00 columnFamilyName A 2024-11-28T07:22:47,521 DEBUG [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:47,521 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(327): Store=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:22:47,521 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:47,522 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:22:47,523 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92fcea6fc878b4b1c7f03e0a8e3d3d00 columnFamilyName B 2024-11-28T07:22:47,523 DEBUG [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:47,523 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(327): Store=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:22:47,523 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:47,524 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:22:47,524 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92fcea6fc878b4b1c7f03e0a8e3d3d00 columnFamilyName C 2024-11-28T07:22:47,524 DEBUG [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:47,524 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(327): Store=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:22:47,525 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:47,525 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:47,525 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:47,527 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:22:47,528 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(1085): writing seq id for 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:47,530 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T07:22:47,531 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(1102): Opened 92fcea6fc878b4b1c7f03e0a8e3d3d00; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66815815, jitterRate=-0.004366770386695862}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:22:47,532 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegion(1001): Region open journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:47,532 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., pid=106, masterSystemTime=1732778567514 2024-11-28T07:22:47,534 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:47,534 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=106}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:47,534 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=92fcea6fc878b4b1c7f03e0a8e3d3d00, regionState=OPEN, openSeqNum=2, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:22:47,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=105 2024-11-28T07:22:47,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=105, state=SUCCESS; OpenRegionProcedure 92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 in 172 msec 2024-11-28T07:22:47,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-28T07:22:47,538 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, ASSIGN in 327 msec 2024-11-28T07:22:47,539 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T07:22:47,539 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778567539"}]},"ts":"1732778567539"} 2024-11-28T07:22:47,540 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T07:22:47,543 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=104, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T07:22:47,544 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1560 sec 2024-11-28T07:22:48,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-28T07:22:48,494 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-28T07:22:48,495 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0bf5e2f0 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b82ba2a 2024-11-28T07:22:48,499 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3637e4c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:48,500 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:48,502 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:48,503 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T07:22:48,504 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59598, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T07:22:48,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T07:22:48,505 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:22:48,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-28T07:22:48,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742224_1400 (size=996) 2024-11-28T07:22:48,916 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-28T07:22:48,916 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-28T07:22:48,918 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T07:22:48,920 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, REOPEN/MOVE}] 2024-11-28T07:22:48,921 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, REOPEN/MOVE 2024-11-28T07:22:48,921 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=92fcea6fc878b4b1c7f03e0a8e3d3d00, regionState=CLOSING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:22:48,922 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T07:22:48,922 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=110, ppid=109, state=RUNNABLE; CloseRegionProcedure 92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:22:49,073 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:49,074 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] handler.UnassignRegionHandler(124): Close 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,074 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T07:22:49,074 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1681): Closing 92fcea6fc878b4b1c7f03e0a8e3d3d00, disabling compactions & flushes 2024-11-28T07:22:49,074 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,074 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,074 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. after waiting 0 ms 2024-11-28T07:22:49,074 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,078 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-28T07:22:49,078 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,078 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegion(1635): Region close journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:49,078 WARN [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] regionserver.HRegionServer(3786): Not adding moved region record: 92fcea6fc878b4b1c7f03e0a8e3d3d00 to self. 2024-11-28T07:22:49,080 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=110}] handler.UnassignRegionHandler(170): Closed 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,080 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=92fcea6fc878b4b1c7f03e0a8e3d3d00, regionState=CLOSED 2024-11-28T07:22:49,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=110, resume processing ppid=109 2024-11-28T07:22:49,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, ppid=109, state=SUCCESS; CloseRegionProcedure 92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 in 159 msec 2024-11-28T07:22:49,082 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, REOPEN/MOVE; state=CLOSED, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=true 2024-11-28T07:22:49,233 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=92fcea6fc878b4b1c7f03e0a8e3d3d00, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,234 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE; OpenRegionProcedure 92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:22:49,385 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:49,389 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,389 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(7285): Opening region: {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:22:49,390 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,390 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:22:49,390 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(7327): checking encryption for 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,390 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(7330): checking classloading for 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,391 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,392 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:22:49,392 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92fcea6fc878b4b1c7f03e0a8e3d3d00 columnFamilyName A 2024-11-28T07:22:49,394 DEBUG [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:49,394 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(327): Store=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:22:49,394 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,395 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:22:49,395 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92fcea6fc878b4b1c7f03e0a8e3d3d00 columnFamilyName B 2024-11-28T07:22:49,395 DEBUG [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:49,395 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(327): Store=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:22:49,395 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,396 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:22:49,396 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 92fcea6fc878b4b1c7f03e0a8e3d3d00 columnFamilyName C 2024-11-28T07:22:49,396 DEBUG [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:49,396 INFO [StoreOpener-92fcea6fc878b4b1c7f03e0a8e3d3d00-1 {}] regionserver.HStore(327): Store=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:22:49,397 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,397 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,398 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,399 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:22:49,400 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(1085): writing seq id for 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,401 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(1102): Opened 92fcea6fc878b4b1c7f03e0a8e3d3d00; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63021693, jitterRate=-0.06090359389781952}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:22:49,401 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegion(1001): Region open journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:49,402 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., pid=111, masterSystemTime=1732778569385 2024-11-28T07:22:49,403 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,403 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=111}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,404 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=92fcea6fc878b4b1c7f03e0a8e3d3d00, regionState=OPEN, openSeqNum=5, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=109 2024-11-28T07:22:49,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=109, state=SUCCESS; OpenRegionProcedure 92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 in 170 msec 2024-11-28T07:22:49,407 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-28T07:22:49,407 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, REOPEN/MOVE in 486 msec 2024-11-28T07:22:49,409 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=108, resume processing ppid=107 2024-11-28T07:22:49,409 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 489 msec 2024-11-28T07:22:49,410 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 904 msec 2024-11-28T07:22:49,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-11-28T07:22:49,412 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75b14fbd to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b6cf8cb 2024-11-28T07:22:49,419 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f422b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,419 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62f74604 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ec15031 2024-11-28T07:22:49,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df33cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,423 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49e13594 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3dd5b441 2024-11-28T07:22:49,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f472e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,427 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c54a0d3 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c336ea4 2024-11-28T07:22:49,432 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167a78b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,433 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3875c8c5 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f94d721 2024-11-28T07:22:49,438 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aee939b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,439 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x319559be to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f49665c 2024-11-28T07:22:49,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2205f666, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,443 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c907e21 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683f8469 2024-11-28T07:22:49,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6584e9ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,450 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61ec0f48 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75e4d3d0 2024-11-28T07:22:49,456 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ec8e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,457 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7819b9e2 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b308f62 2024-11-28T07:22:49,462 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787e5169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,463 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47679076 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68035c67 2024-11-28T07:22:49,466 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@627cad17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:22:49,468 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:49,469 DEBUG [hconnection-0x1661fba3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,469 DEBUG [hconnection-0x5ad12f6c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-28T07:22:49,470 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:49,470 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,470 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T07:22:49,470 DEBUG [hconnection-0x304a246f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,471 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:49,471 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:49,472 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56876, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,474 DEBUG [hconnection-0x471ddeb6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,475 DEBUG [hconnection-0x7e6404d8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,475 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56890, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,476 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56894, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,476 DEBUG [hconnection-0x2927f88c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,477 DEBUG [hconnection-0x4ff581d6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,477 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56902, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,478 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56914, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,478 DEBUG [hconnection-0x5e502cb7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,478 DEBUG [hconnection-0x1d076514-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,479 DEBUG [hconnection-0x73bc41b3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:22:49,479 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,479 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56924, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,480 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56934, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:22:49,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:22:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:49,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:49,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778629496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778629496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778629497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778629499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778629500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128181005c01a3c438f9f86f5c837a1b732_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778569480/Put/seqid=0 2024-11-28T07:22:49,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742225_1401 (size=17034) 2024-11-28T07:22:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T07:22:49,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778629600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778629600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778629600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778629600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778629601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,623 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:49,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T07:22:49,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:49,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:49,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:49,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:49,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T07:22:49,776 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:49,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T07:22:49,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:49,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:49,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:49,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:49,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778629805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778629806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778629806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778629806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:49,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778629807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:49,927 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:49,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:49,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T07:22:49,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:49,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:49,929 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:49,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:49,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:49,932 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128181005c01a3c438f9f86f5c837a1b732_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128181005c01a3c438f9f86f5c837a1b732_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:49,933 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/18ee6aab1c8142bfb9d2f0a9ac488283, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:49,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/18ee6aab1c8142bfb9d2f0a9ac488283 is 175, key is test_row_0/A:col10/1732778569480/Put/seqid=0 2024-11-28T07:22:49,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742226_1402 (size=48139) 2024-11-28T07:22:50,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T07:22:50,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:50,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T07:22:50,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:50,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:50,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:50,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:50,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:50,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:50,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778630109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778630110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778630111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778630111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778630112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,234 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:50,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T07:22:50,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:50,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:50,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:50,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:50,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:50,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:50,339 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/18ee6aab1c8142bfb9d2f0a9ac488283 2024-11-28T07:22:50,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/3fcda4e155bf4b7883c6cd43c8a959fd is 50, key is test_row_0/B:col10/1732778569480/Put/seqid=0 2024-11-28T07:22:50,387 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:50,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T07:22:50,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:50,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:50,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:50,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:50,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:50,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:50,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742227_1403 (size=12001) 2024-11-28T07:22:50,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/3fcda4e155bf4b7883c6cd43c8a959fd 2024-11-28T07:22:50,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/847dca02e5364844818fdbf6c8cba976 is 50, key is test_row_0/C:col10/1732778569480/Put/seqid=0 2024-11-28T07:22:50,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742228_1404 (size=12001) 2024-11-28T07:22:50,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/847dca02e5364844818fdbf6c8cba976 2024-11-28T07:22:50,485 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/18ee6aab1c8142bfb9d2f0a9ac488283 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/18ee6aab1c8142bfb9d2f0a9ac488283 2024-11-28T07:22:50,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/18ee6aab1c8142bfb9d2f0a9ac488283, entries=250, sequenceid=17, filesize=47.0 K 2024-11-28T07:22:50,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/3fcda4e155bf4b7883c6cd43c8a959fd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3fcda4e155bf4b7883c6cd43c8a959fd 2024-11-28T07:22:50,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3fcda4e155bf4b7883c6cd43c8a959fd, entries=150, sequenceid=17, filesize=11.7 K 2024-11-28T07:22:50,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/847dca02e5364844818fdbf6c8cba976 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/847dca02e5364844818fdbf6c8cba976 2024-11-28T07:22:50,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/847dca02e5364844818fdbf6c8cba976, entries=150, sequenceid=17, filesize=11.7 K 2024-11-28T07:22:50,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1025ms, sequenceid=17, compaction requested=false 2024-11-28T07:22:50,506 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-28T07:22:50,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:50,540 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:50,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T07:22:50,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:50,541 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:22:50,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:50,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:50,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:50,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:50,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:50,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:50,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284b5ad46f322e4841866571df0b5281ed_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778569493/Put/seqid=0 2024-11-28T07:22:50,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742229_1405 (size=12154) 2024-11-28T07:22:50,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:50,560 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284b5ad46f322e4841866571df0b5281ed_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284b5ad46f322e4841866571df0b5281ed_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:50,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/61d0478abd3b490893175f211f3a72f8, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:50,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/61d0478abd3b490893175f211f3a72f8 is 175, key is test_row_0/A:col10/1732778569493/Put/seqid=0 2024-11-28T07:22:50,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742230_1406 (size=30955) 2024-11-28T07:22:50,571 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/61d0478abd3b490893175f211f3a72f8 2024-11-28T07:22:50,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T07:22:50,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0cc65cdba1de4b4d84092bb7b41eb6e6 is 50, key is test_row_0/B:col10/1732778569493/Put/seqid=0 2024-11-28T07:22:50,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742231_1407 (size=12001) 2024-11-28T07:22:50,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:50,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:50,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778630622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778630623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778630626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778630627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778630627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778630728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778630729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778630732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778630734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778630735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778630933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778630935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778630935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778630936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:50,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778630941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:50,983 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0cc65cdba1de4b4d84092bb7b41eb6e6 2024-11-28T07:22:50,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e093231cbb074d05a9c0bace2071ed4c is 50, key is test_row_0/C:col10/1732778569493/Put/seqid=0 2024-11-28T07:22:51,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742232_1408 (size=12001) 2024-11-28T07:22:51,197 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T07:22:51,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778631238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778631240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778631241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778631241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778631246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,401 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e093231cbb074d05a9c0bace2071ed4c 2024-11-28T07:22:51,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/61d0478abd3b490893175f211f3a72f8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/61d0478abd3b490893175f211f3a72f8 2024-11-28T07:22:51,412 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/61d0478abd3b490893175f211f3a72f8, entries=150, sequenceid=41, filesize=30.2 K 2024-11-28T07:22:51,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0cc65cdba1de4b4d84092bb7b41eb6e6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0cc65cdba1de4b4d84092bb7b41eb6e6 2024-11-28T07:22:51,433 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0cc65cdba1de4b4d84092bb7b41eb6e6, entries=150, sequenceid=41, filesize=11.7 K 2024-11-28T07:22:51,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e093231cbb074d05a9c0bace2071ed4c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e093231cbb074d05a9c0bace2071ed4c 2024-11-28T07:22:51,439 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e093231cbb074d05a9c0bace2071ed4c, entries=150, sequenceid=41, filesize=11.7 K 2024-11-28T07:22:51,443 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 902ms, sequenceid=41, compaction requested=false 2024-11-28T07:22:51,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:51,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:51,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-28T07:22:51,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-28T07:22:51,445 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-28T07:22:51,445 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9730 sec 2024-11-28T07:22:51,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.9790 sec 2024-11-28T07:22:51,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T07:22:51,575 INFO [Thread-1790 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-28T07:22:51,577 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:51,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-28T07:22:51,579 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:51,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T07:22:51,580 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:51,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:51,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T07:22:51,731 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:51,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-28T07:22:51,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:51,744 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:22:51,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:51,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:51,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:51,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:51,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:51,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:51,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128277925a4481e47928225971f0886b3d9_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778570621/Put/seqid=0 2024-11-28T07:22:51,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:51,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:51,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742233_1409 (size=12154) 2024-11-28T07:22:51,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778631837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778631844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778631845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778631847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778631847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T07:22:51,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778631949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778631960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778631961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778631961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:51,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:51,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778631961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778632164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T07:22:52,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:52,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778632177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778632177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,193 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128277925a4481e47928225971f0886b3d9_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128277925a4481e47928225971f0886b3d9_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778632178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778632178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/078cd58f6fb644faad79887780cf4395, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:52,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/078cd58f6fb644faad79887780cf4395 is 175, key is test_row_0/A:col10/1732778570621/Put/seqid=0 2024-11-28T07:22:52,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742234_1410 (size=30955) 2024-11-28T07:22:52,236 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/078cd58f6fb644faad79887780cf4395 2024-11-28T07:22:52,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/96a9b08d91aa43eaa6b8c65bdf860bb2 is 50, key is test_row_0/B:col10/1732778570621/Put/seqid=0 2024-11-28T07:22:52,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742235_1411 (size=12001) 2024-11-28T07:22:52,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778632469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778632495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778632495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778632495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:52,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778632496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T07:22:52,683 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/96a9b08d91aa43eaa6b8c65bdf860bb2 2024-11-28T07:22:52,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/6961a6b043a24a58a7853094444c61cb is 50, key is test_row_0/C:col10/1732778570621/Put/seqid=0 2024-11-28T07:22:52,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742236_1412 (size=12001) 2024-11-28T07:22:52,769 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/6961a6b043a24a58a7853094444c61cb 2024-11-28T07:22:52,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/078cd58f6fb644faad79887780cf4395 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/078cd58f6fb644faad79887780cf4395 2024-11-28T07:22:52,779 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/078cd58f6fb644faad79887780cf4395, entries=150, sequenceid=53, filesize=30.2 K 2024-11-28T07:22:52,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/96a9b08d91aa43eaa6b8c65bdf860bb2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/96a9b08d91aa43eaa6b8c65bdf860bb2 2024-11-28T07:22:52,784 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/96a9b08d91aa43eaa6b8c65bdf860bb2, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T07:22:52,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/6961a6b043a24a58a7853094444c61cb as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/6961a6b043a24a58a7853094444c61cb 2024-11-28T07:22:52,788 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/6961a6b043a24a58a7853094444c61cb, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T07:22:52,789 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1045ms, sequenceid=53, compaction requested=true 2024-11-28T07:22:52,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:52,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:52,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-28T07:22:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-28T07:22:52,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-28T07:22:52,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2100 sec 2024-11-28T07:22:52,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.2150 sec 2024-11-28T07:22:53,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:22:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:53,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:53,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:53,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:53,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:53,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:53,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112886b77d2583774242b9778b7207cdab1b_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778571840/Put/seqid=0 2024-11-28T07:22:53,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778633028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778633030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778633025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778633033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778633035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742237_1413 (size=14594) 2024-11-28T07:22:53,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778633148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778633149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778633149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778633150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778633149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778633363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778633366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778633366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778633366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778633366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,491 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:53,495 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112886b77d2583774242b9778b7207cdab1b_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112886b77d2583774242b9778b7207cdab1b_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:53,497 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/37b20a35ef5341bcad84ac08c37e9813, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:53,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/37b20a35ef5341bcad84ac08c37e9813 is 175, key is test_row_0/A:col10/1732778571840/Put/seqid=0 2024-11-28T07:22:53,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742238_1414 (size=39549) 2024-11-28T07:22:53,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778633667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778633670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778633671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778633672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T07:22:53,684 INFO [Thread-1790 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-28T07:22:53,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:53,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-28T07:22:53,687 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:53,687 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:53,688 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T07:22:53,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778633682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T07:22:53,840 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:53,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-28T07:22:53,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:53,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:53,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:53,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:53,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:53,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:53,960 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/37b20a35ef5341bcad84ac08c37e9813 2024-11-28T07:22:53,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/23731effc2e0478b844a537688d0974d is 50, key is test_row_0/B:col10/1732778571840/Put/seqid=0 2024-11-28T07:22:53,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T07:22:53,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:53,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-28T07:22:53,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:53,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:53,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:53,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:53,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:53,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:54,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742239_1415 (size=12001) 2024-11-28T07:22:54,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/23731effc2e0478b844a537688d0974d 2024-11-28T07:22:54,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/eb6534af51c64002aa2b386f7229a08c is 50, key is test_row_0/C:col10/1732778571840/Put/seqid=0 2024-11-28T07:22:54,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742240_1416 (size=12001) 2024-11-28T07:22:54,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/eb6534af51c64002aa2b386f7229a08c 2024-11-28T07:22:54,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/37b20a35ef5341bcad84ac08c37e9813 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/37b20a35ef5341bcad84ac08c37e9813 2024-11-28T07:22:54,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/37b20a35ef5341bcad84ac08c37e9813, entries=200, sequenceid=78, filesize=38.6 K 2024-11-28T07:22:54,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/23731effc2e0478b844a537688d0974d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/23731effc2e0478b844a537688d0974d 2024-11-28T07:22:54,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/23731effc2e0478b844a537688d0974d, entries=150, sequenceid=78, filesize=11.7 K 2024-11-28T07:22:54,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/eb6534af51c64002aa2b386f7229a08c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/eb6534af51c64002aa2b386f7229a08c 2024-11-28T07:22:54,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/eb6534af51c64002aa2b386f7229a08c, entries=150, sequenceid=78, filesize=11.7 K 2024-11-28T07:22:54,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1124ms, sequenceid=78, compaction requested=true 2024-11-28T07:22:54,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:54,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:54,125 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:54,126 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:54,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:54,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:54,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:54,126 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:22:54,126 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:54,127 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 149598 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:54,127 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:22:54,127 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:54,127 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/18ee6aab1c8142bfb9d2f0a9ac488283, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/61d0478abd3b490893175f211f3a72f8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/078cd58f6fb644faad79887780cf4395, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/37b20a35ef5341bcad84ac08c37e9813] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=146.1 K 2024-11-28T07:22:54,127 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:54,127 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/18ee6aab1c8142bfb9d2f0a9ac488283, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/61d0478abd3b490893175f211f3a72f8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/078cd58f6fb644faad79887780cf4395, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/37b20a35ef5341bcad84ac08c37e9813] 2024-11-28T07:22:54,128 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:54,128 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:22:54,128 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:54,128 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3fcda4e155bf4b7883c6cd43c8a959fd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0cc65cdba1de4b4d84092bb7b41eb6e6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/96a9b08d91aa43eaa6b8c65bdf860bb2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/23731effc2e0478b844a537688d0974d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=46.9 K 2024-11-28T07:22:54,128 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 18ee6aab1c8142bfb9d2f0a9ac488283, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732778569476 2024-11-28T07:22:54,129 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fcda4e155bf4b7883c6cd43c8a959fd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732778569478 2024-11-28T07:22:54,129 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0cc65cdba1de4b4d84092bb7b41eb6e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732778569493 2024-11-28T07:22:54,129 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 61d0478abd3b490893175f211f3a72f8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732778569493 2024-11-28T07:22:54,129 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96a9b08d91aa43eaa6b8c65bdf860bb2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778570620 2024-11-28T07:22:54,129 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 078cd58f6fb644faad79887780cf4395, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778570620 2024-11-28T07:22:54,129 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23731effc2e0478b844a537688d0974d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778571840 2024-11-28T07:22:54,130 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 37b20a35ef5341bcad84ac08c37e9813, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778571840 2024-11-28T07:22:54,148 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:54,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-28T07:22:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:54,149 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:22:54,149 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:54,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:54,166 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#359 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:54,167 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/7d15c14ebdc84fb8ab4a925be27a8136 is 50, key is test_row_0/B:col10/1732778571840/Put/seqid=0 2024-11-28T07:22:54,173 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128f5e924b1c11a4f289147bb9e4d68f48c_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:54,176 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128f5e924b1c11a4f289147bb9e4d68f48c_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:54,176 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f5e924b1c11a4f289147bb9e4d68f48c_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:54,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:54,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:54,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112898116b9e27d24f79abeaab617098b560_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778573022/Put/seqid=0 2024-11-28T07:22:54,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742241_1417 (size=12139) 2024-11-28T07:22:54,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742242_1418 (size=4469) 2024-11-28T07:22:54,233 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/7d15c14ebdc84fb8ab4a925be27a8136 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7d15c14ebdc84fb8ab4a925be27a8136 2024-11-28T07:22:54,240 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 7d15c14ebdc84fb8ab4a925be27a8136(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:54,240 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:54,240 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=12, startTime=1732778574125; duration=0sec 2024-11-28T07:22:54,241 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:54,241 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:22:54,241 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:22:54,242 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:22:54,242 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:22:54,243 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:54,243 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/847dca02e5364844818fdbf6c8cba976, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e093231cbb074d05a9c0bace2071ed4c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/6961a6b043a24a58a7853094444c61cb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/eb6534af51c64002aa2b386f7229a08c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=46.9 K 2024-11-28T07:22:54,243 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 847dca02e5364844818fdbf6c8cba976, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732778569478 2024-11-28T07:22:54,244 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e093231cbb074d05a9c0bace2071ed4c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732778569493 2024-11-28T07:22:54,244 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6961a6b043a24a58a7853094444c61cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778570620 2024-11-28T07:22:54,245 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb6534af51c64002aa2b386f7229a08c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778571840 2024-11-28T07:22:54,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742243_1419 (size=12154) 2024-11-28T07:22:54,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:54,268 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112898116b9e27d24f79abeaab617098b560_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112898116b9e27d24f79abeaab617098b560_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:54,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d22273e218114309b51923a0374185b7, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:54,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d22273e218114309b51923a0374185b7 is 175, key is test_row_0/A:col10/1732778573022/Put/seqid=0 2024-11-28T07:22:54,271 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#361 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:54,272 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/f680b2e931544ee7bde9e73bc2a8f3eb is 50, key is test_row_0/C:col10/1732778571840/Put/seqid=0 2024-11-28T07:22:54,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778634254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778634264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778634273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T07:22:54,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778634278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778634277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742244_1420 (size=30955) 2024-11-28T07:22:54,350 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=89, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d22273e218114309b51923a0374185b7 2024-11-28T07:22:54,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742245_1421 (size=12139) 2024-11-28T07:22:54,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778634380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778634381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/9ff7f40e45d446c6876b34d2be0556cb is 50, key is test_row_0/B:col10/1732778573022/Put/seqid=0 2024-11-28T07:22:54,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778634389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778634392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778634394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742246_1422 (size=12001) 2024-11-28T07:22:54,440 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/9ff7f40e45d446c6876b34d2be0556cb 2024-11-28T07:22:54,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/be5e60a51cd24357a7490ee5e747f419 is 50, key is test_row_0/C:col10/1732778573022/Put/seqid=0 2024-11-28T07:22:54,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742247_1423 (size=12001) 2024-11-28T07:22:54,503 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/be5e60a51cd24357a7490ee5e747f419 2024-11-28T07:22:54,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d22273e218114309b51923a0374185b7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d22273e218114309b51923a0374185b7 2024-11-28T07:22:54,520 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d22273e218114309b51923a0374185b7, entries=150, sequenceid=89, filesize=30.2 K 2024-11-28T07:22:54,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/9ff7f40e45d446c6876b34d2be0556cb as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/9ff7f40e45d446c6876b34d2be0556cb 2024-11-28T07:22:54,525 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/9ff7f40e45d446c6876b34d2be0556cb, entries=150, sequenceid=89, filesize=11.7 K 2024-11-28T07:22:54,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/be5e60a51cd24357a7490ee5e747f419 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/be5e60a51cd24357a7490ee5e747f419 2024-11-28T07:22:54,537 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/be5e60a51cd24357a7490ee5e747f419, entries=150, sequenceid=89, filesize=11.7 K 2024-11-28T07:22:54,538 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 389ms, sequenceid=89, compaction requested=false 2024-11-28T07:22:54,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:54,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:54,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-28T07:22:54,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-28T07:22:54,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-28T07:22:54,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 852 msec 2024-11-28T07:22:54,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 857 msec 2024-11-28T07:22:54,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T07:22:54,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:54,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:54,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:54,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:54,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:54,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:54,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:54,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128902e023643c64a58bfc5211842234e31_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778574603/Put/seqid=0 2024-11-28T07:22:54,633 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#358 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:54,634 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/185daed9bdea4dc6906df0332ea97908 is 175, key is test_row_0/A:col10/1732778571840/Put/seqid=0 2024-11-28T07:22:54,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778634624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778634625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778634626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778634626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778634626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742248_1424 (size=14594) 2024-11-28T07:22:54,647 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:54,654 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128902e023643c64a58bfc5211842234e31_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128902e023643c64a58bfc5211842234e31_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:54,656 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/549c1e18603d4d1185007db3df4e0cd1, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:54,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/549c1e18603d4d1185007db3df4e0cd1 is 175, key is test_row_0/A:col10/1732778574603/Put/seqid=0 2024-11-28T07:22:54,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742249_1425 (size=31093) 2024-11-28T07:22:54,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742250_1426 (size=39549) 2024-11-28T07:22:54,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778634740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778634740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778634740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778634741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778634741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,786 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/f680b2e931544ee7bde9e73bc2a8f3eb as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f680b2e931544ee7bde9e73bc2a8f3eb 2024-11-28T07:22:54,791 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into f680b2e931544ee7bde9e73bc2a8f3eb(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:54,791 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:54,791 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=12, startTime=1732778574126; duration=0sec 2024-11-28T07:22:54,791 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:54,791 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:22:54,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T07:22:54,792 INFO [Thread-1790 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-28T07:22:54,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:54,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-28T07:22:54,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T07:22:54,806 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:54,807 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:54,807 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:54,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T07:22:54,959 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:54,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-28T07:22:54,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:54,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:54,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:54,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:54,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:54,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:54,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778634955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778634955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778634955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778634955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:54,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:54,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778634955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,087 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/185daed9bdea4dc6906df0332ea97908 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/185daed9bdea4dc6906df0332ea97908 2024-11-28T07:22:55,096 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 185daed9bdea4dc6906df0332ea97908(size=30.4 K), total size for store is 60.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:55,096 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:55,096 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=12, startTime=1732778574125; duration=0sec 2024-11-28T07:22:55,096 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:55,096 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:22:55,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T07:22:55,112 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:55,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-28T07:22:55,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:55,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:55,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:55,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:55,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:55,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:55,128 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/549c1e18603d4d1185007db3df4e0cd1 2024-11-28T07:22:55,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/e3fe6557aae44c46a766d9981d5a7132 is 50, key is test_row_0/B:col10/1732778574603/Put/seqid=0 2024-11-28T07:22:55,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742251_1427 (size=12001) 2024-11-28T07:22:55,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/e3fe6557aae44c46a766d9981d5a7132 2024-11-28T07:22:55,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b87fbbb61ae54c3bbedec1ba9e821ae3 is 50, key is test_row_0/C:col10/1732778574603/Put/seqid=0 2024-11-28T07:22:55,266 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:55,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-28T07:22:55,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:55,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:55,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:55,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:55,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:55,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:55,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742252_1428 (size=12001) 2024-11-28T07:22:55,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b87fbbb61ae54c3bbedec1ba9e821ae3 2024-11-28T07:22:55,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/549c1e18603d4d1185007db3df4e0cd1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/549c1e18603d4d1185007db3df4e0cd1 2024-11-28T07:22:55,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778635269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778635270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778635270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778635271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778635271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/549c1e18603d4d1185007db3df4e0cd1, entries=200, sequenceid=116, filesize=38.6 K 2024-11-28T07:22:55,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/e3fe6557aae44c46a766d9981d5a7132 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e3fe6557aae44c46a766d9981d5a7132 2024-11-28T07:22:55,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e3fe6557aae44c46a766d9981d5a7132, entries=150, sequenceid=116, filesize=11.7 K 2024-11-28T07:22:55,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b87fbbb61ae54c3bbedec1ba9e821ae3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b87fbbb61ae54c3bbedec1ba9e821ae3 2024-11-28T07:22:55,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b87fbbb61ae54c3bbedec1ba9e821ae3, entries=150, sequenceid=116, filesize=11.7 K 2024-11-28T07:22:55,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 707ms, sequenceid=116, compaction requested=true 2024-11-28T07:22:55,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:55,313 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:55,314 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101597 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:55,314 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:22:55,314 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:55,314 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/185daed9bdea4dc6906df0332ea97908, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d22273e218114309b51923a0374185b7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/549c1e18603d4d1185007db3df4e0cd1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=99.2 K 2024-11-28T07:22:55,314 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:55,314 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/185daed9bdea4dc6906df0332ea97908, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d22273e218114309b51923a0374185b7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/549c1e18603d4d1185007db3df4e0cd1] 2024-11-28T07:22:55,315 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 185daed9bdea4dc6906df0332ea97908, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778571840 2024-11-28T07:22:55,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:55,315 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d22273e218114309b51923a0374185b7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732778573022 2024-11-28T07:22:55,316 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 549c1e18603d4d1185007db3df4e0cd1, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732778574252 2024-11-28T07:22:55,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:55,319 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:55,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:55,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:55,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:55,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:55,321 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:55,321 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:22:55,321 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:55,321 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7d15c14ebdc84fb8ab4a925be27a8136, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/9ff7f40e45d446c6876b34d2be0556cb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e3fe6557aae44c46a766d9981d5a7132] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=35.3 K 2024-11-28T07:22:55,322 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d15c14ebdc84fb8ab4a925be27a8136, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778571840 2024-11-28T07:22:55,323 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ff7f40e45d446c6876b34d2be0556cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732778573022 2024-11-28T07:22:55,323 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e3fe6557aae44c46a766d9981d5a7132, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732778574252 2024-11-28T07:22:55,333 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:55,351 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#368 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:55,352 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/1200a41591e641769e97ae7ed188a1a5 is 50, key is test_row_0/B:col10/1732778574603/Put/seqid=0 2024-11-28T07:22:55,361 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128e54bf3e4c8ac44468494f30f2221b1ce_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:55,363 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128e54bf3e4c8ac44468494f30f2221b1ce_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:55,363 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128e54bf3e4c8ac44468494f30f2221b1ce_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:55,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T07:22:55,420 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:55,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-28T07:22:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:55,421 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:22:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:55,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742253_1429 (size=12241) 2024-11-28T07:22:55,431 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/1200a41591e641769e97ae7ed188a1a5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1200a41591e641769e97ae7ed188a1a5 2024-11-28T07:22:55,437 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 1200a41591e641769e97ae7ed188a1a5(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:55,437 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:55,437 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=13, startTime=1732778575319; duration=0sec 2024-11-28T07:22:55,437 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:55,437 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:22:55,437 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:55,440 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:55,440 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:22:55,440 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:55,440 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f680b2e931544ee7bde9e73bc2a8f3eb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/be5e60a51cd24357a7490ee5e747f419, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b87fbbb61ae54c3bbedec1ba9e821ae3] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=35.3 K 2024-11-28T07:22:55,440 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting f680b2e931544ee7bde9e73bc2a8f3eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778571840 2024-11-28T07:22:55,446 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting be5e60a51cd24357a7490ee5e747f419, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732778573022 2024-11-28T07:22:55,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742254_1430 (size=4469) 2024-11-28T07:22:55,446 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b87fbbb61ae54c3bbedec1ba9e821ae3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732778574252 2024-11-28T07:22:55,447 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#367 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:55,448 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4b269ff1f923457bb6e8ba6baebdd19e is 175, key is test_row_0/A:col10/1732778574603/Put/seqid=0 2024-11-28T07:22:55,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286d4994d185994c1ea2840cf1e69036fc_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778574610/Put/seqid=0 2024-11-28T07:22:55,500 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#370 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:55,500 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e6ff40602c2e4743b4cb9c86c1a8749f is 50, key is test_row_0/C:col10/1732778574603/Put/seqid=0 2024-11-28T07:22:55,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742255_1431 (size=31195) 2024-11-28T07:22:55,528 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4b269ff1f923457bb6e8ba6baebdd19e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4b269ff1f923457bb6e8ba6baebdd19e 2024-11-28T07:22:55,534 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 4b269ff1f923457bb6e8ba6baebdd19e(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:55,534 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:55,534 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=13, startTime=1732778575313; duration=0sec 2024-11-28T07:22:55,534 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:55,534 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:22:55,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742256_1432 (size=12154) 2024-11-28T07:22:55,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:55,582 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286d4994d185994c1ea2840cf1e69036fc_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286d4994d185994c1ea2840cf1e69036fc_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:55,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742257_1433 (size=12241) 2024-11-28T07:22:55,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/fc5850c309304d1fb33f81f8471ccac8, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:55,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/fc5850c309304d1fb33f81f8471ccac8 is 175, key is test_row_0/A:col10/1732778574610/Put/seqid=0 2024-11-28T07:22:55,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742258_1434 (size=30955) 2024-11-28T07:22:55,639 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/fc5850c309304d1fb33f81f8471ccac8 2024-11-28T07:22:55,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/204a6e0146974d2ab2305140da7e0b1b is 50, key is test_row_0/B:col10/1732778574610/Put/seqid=0 2024-11-28T07:22:55,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742259_1435 (size=12001) 2024-11-28T07:22:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:55,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:55,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778635831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778635833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778635835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778635836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778635839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T07:22:55,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778635942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778635942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778635949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778635949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778635949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:55,994 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e6ff40602c2e4743b4cb9c86c1a8749f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e6ff40602c2e4743b4cb9c86c1a8749f 2024-11-28T07:22:55,999 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into e6ff40602c2e4743b4cb9c86c1a8749f(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:56,000 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:56,000 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=13, startTime=1732778575319; duration=0sec 2024-11-28T07:22:56,000 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:56,000 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:22:56,107 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/204a6e0146974d2ab2305140da7e0b1b 2024-11-28T07:22:56,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/2f2b0843cf724ef1956bcda1f064710a is 50, key is test_row_0/C:col10/1732778574610/Put/seqid=0 2024-11-28T07:22:56,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742260_1436 (size=12001) 2024-11-28T07:22:56,161 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/2f2b0843cf724ef1956bcda1f064710a 2024-11-28T07:22:56,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778636153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778636153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/fc5850c309304d1fb33f81f8471ccac8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/fc5850c309304d1fb33f81f8471ccac8 2024-11-28T07:22:56,172 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/fc5850c309304d1fb33f81f8471ccac8, entries=150, sequenceid=130, filesize=30.2 K 2024-11-28T07:22:56,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/204a6e0146974d2ab2305140da7e0b1b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/204a6e0146974d2ab2305140da7e0b1b 2024-11-28T07:22:56,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778636162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778636162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778636163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,179 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/204a6e0146974d2ab2305140da7e0b1b, entries=150, sequenceid=130, filesize=11.7 K 2024-11-28T07:22:56,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/2f2b0843cf724ef1956bcda1f064710a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/2f2b0843cf724ef1956bcda1f064710a 2024-11-28T07:22:56,185 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/2f2b0843cf724ef1956bcda1f064710a, entries=150, sequenceid=130, filesize=11.7 K 2024-11-28T07:22:56,186 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 765ms, sequenceid=130, compaction requested=false 2024-11-28T07:22:56,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:56,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:56,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-28T07:22:56,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-28T07:22:56,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-28T07:22:56,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3800 sec 2024-11-28T07:22:56,190 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.3840 sec 2024-11-28T07:22:56,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:22:56,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:56,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:56,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:56,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:56,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:56,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:56,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:56,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128910b819149664578bd70c5a7949c3fb0_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778575829/Put/seqid=0 2024-11-28T07:22:56,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778636493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778636493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778636495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778636507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778636507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742261_1437 (size=12304) 2024-11-28T07:22:56,541 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:56,546 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128910b819149664578bd70c5a7949c3fb0_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128910b819149664578bd70c5a7949c3fb0_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:56,547 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/97f223726ad8436ab7a7aea8531e6216, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:56,547 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/97f223726ad8436ab7a7aea8531e6216 is 175, key is test_row_0/A:col10/1732778575829/Put/seqid=0 2024-11-28T07:22:56,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742262_1438 (size=31105) 2024-11-28T07:22:56,606 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/97f223726ad8436ab7a7aea8531e6216 2024-11-28T07:22:56,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778636610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778636611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778636611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0308c4a2f2c04541bf897ff2281d8ace is 50, key is test_row_0/B:col10/1732778575829/Put/seqid=0 2024-11-28T07:22:56,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778636624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778636625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742263_1439 (size=12151) 2024-11-28T07:22:56,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0308c4a2f2c04541bf897ff2281d8ace 2024-11-28T07:22:56,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/306f49505a08410cb23195a42ee7d86c is 50, key is test_row_0/C:col10/1732778575829/Put/seqid=0 2024-11-28T07:22:56,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742264_1440 (size=12151) 2024-11-28T07:22:56,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778636823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778636825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778636825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778636836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:56,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778636839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:56,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T07:22:56,912 INFO [Thread-1790 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-28T07:22:56,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:22:56,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-28T07:22:56,922 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:22:56,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T07:22:56,922 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:22:56,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:22:57,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T07:22:57,074 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:57,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:57,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:57,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/306f49505a08410cb23195a42ee7d86c 2024-11-28T07:22:57,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/97f223726ad8436ab7a7aea8531e6216 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/97f223726ad8436ab7a7aea8531e6216 2024-11-28T07:22:57,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778637135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/97f223726ad8436ab7a7aea8531e6216, entries=150, sequenceid=158, filesize=30.4 K 2024-11-28T07:22:57,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0308c4a2f2c04541bf897ff2281d8ace as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0308c4a2f2c04541bf897ff2281d8ace 2024-11-28T07:22:57,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778637136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778637137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0308c4a2f2c04541bf897ff2281d8ace, entries=150, sequenceid=158, filesize=11.9 K 2024-11-28T07:22:57,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/306f49505a08410cb23195a42ee7d86c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/306f49505a08410cb23195a42ee7d86c 2024-11-28T07:22:57,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/306f49505a08410cb23195a42ee7d86c, entries=150, sequenceid=158, filesize=11.9 K 2024-11-28T07:22:57,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 677ms, sequenceid=158, compaction requested=true 2024-11-28T07:22:57,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:57,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:57,156 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93255 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:57,156 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:22:57,157 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,157 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4b269ff1f923457bb6e8ba6baebdd19e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/fc5850c309304d1fb33f81f8471ccac8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/97f223726ad8436ab7a7aea8531e6216] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=91.1 K 2024-11-28T07:22:57,157 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,157 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4b269ff1f923457bb6e8ba6baebdd19e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/fc5850c309304d1fb33f81f8471ccac8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/97f223726ad8436ab7a7aea8531e6216] 2024-11-28T07:22:57,157 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b269ff1f923457bb6e8ba6baebdd19e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732778574252 2024-11-28T07:22:57,158 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc5850c309304d1fb33f81f8471ccac8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732778574610 2024-11-28T07:22:57,158 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97f223726ad8436ab7a7aea8531e6216, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732778575829 2024-11-28T07:22:57,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:57,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:57,164 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:57,165 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:57,165 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:22:57,166 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,166 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1200a41591e641769e97ae7ed188a1a5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/204a6e0146974d2ab2305140da7e0b1b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0308c4a2f2c04541bf897ff2281d8ace] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=35.5 K 2024-11-28T07:22:57,166 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1200a41591e641769e97ae7ed188a1a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732778574252 2024-11-28T07:22:57,166 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 204a6e0146974d2ab2305140da7e0b1b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732778574610 2024-11-28T07:22:57,167 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0308c4a2f2c04541bf897ff2281d8ace, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732778575829 2024-11-28T07:22:57,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:57,172 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:57,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:57,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:57,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:57,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:57,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:22:57,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:57,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:57,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:57,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:57,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:57,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:57,182 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#377 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:57,183 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/282523c803de4decac59d963b769cb3c is 50, key is test_row_0/B:col10/1732778575829/Put/seqid=0 2024-11-28T07:22:57,198 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411280b884688bdbb47d5b685a270e3c139c5_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:57,200 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411280b884688bdbb47d5b685a270e3c139c5_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:57,200 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280b884688bdbb47d5b685a270e3c139c5_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:57,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T07:22:57,228 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:57,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:57,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:57,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280bb948ca685d4b709dae6e15a1a8d019_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778577169/Put/seqid=0 2024-11-28T07:22:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742265_1441 (size=12493) 2024-11-28T07:22:57,272 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/282523c803de4decac59d963b769cb3c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/282523c803de4decac59d963b769cb3c 2024-11-28T07:22:57,277 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 282523c803de4decac59d963b769cb3c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:57,277 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:57,277 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=13, startTime=1732778577164; duration=0sec 2024-11-28T07:22:57,277 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:57,277 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:22:57,277 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:57,278 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:57,279 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:22:57,279 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,279 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e6ff40602c2e4743b4cb9c86c1a8749f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/2f2b0843cf724ef1956bcda1f064710a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/306f49505a08410cb23195a42ee7d86c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=35.5 K 2024-11-28T07:22:57,279 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e6ff40602c2e4743b4cb9c86c1a8749f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732778574252 2024-11-28T07:22:57,279 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f2b0843cf724ef1956bcda1f064710a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732778574610 2024-11-28T07:22:57,280 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 306f49505a08410cb23195a42ee7d86c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732778575829 2024-11-28T07:22:57,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742266_1442 (size=4469) 2024-11-28T07:22:57,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778637291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,315 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#379 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:57,316 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/f690960bc2f44f43b08adfc8e792c2e2 is 50, key is test_row_0/C:col10/1732778575829/Put/seqid=0 2024-11-28T07:22:57,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778637304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742267_1443 (size=17284) 2024-11-28T07:22:57,337 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:57,342 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280bb948ca685d4b709dae6e15a1a8d019_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280bb948ca685d4b709dae6e15a1a8d019_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:57,343 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/8787b47d0aed467fb4f6c7b17fc82ca5, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:57,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/8787b47d0aed467fb4f6c7b17fc82ca5 is 175, key is test_row_0/A:col10/1732778577169/Put/seqid=0 2024-11-28T07:22:57,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742268_1444 (size=12493) 2024-11-28T07:22:57,381 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:57,382 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:57,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:57,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742269_1445 (size=48389) 2024-11-28T07:22:57,405 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=171, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/8787b47d0aed467fb4f6c7b17fc82ca5 2024-11-28T07:22:57,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/d9e65b3c242f45af95591c0a23d97dd8 is 50, key is test_row_0/B:col10/1732778577169/Put/seqid=0 2024-11-28T07:22:57,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778637414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778637427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742270_1446 (size=12151) 2024-11-28T07:22:57,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T07:22:57,536 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:57,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:57,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:57,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778637627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778637646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778637646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778637650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778637650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,690 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:57,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:57,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:57,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,693 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#376 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:57,694 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/e02959ff9816405e881e05bc2840136f is 175, key is test_row_0/A:col10/1732778575829/Put/seqid=0 2024-11-28T07:22:57,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742271_1447 (size=31447) 2024-11-28T07:22:57,779 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/f690960bc2f44f43b08adfc8e792c2e2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f690960bc2f44f43b08adfc8e792c2e2 2024-11-28T07:22:57,789 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into f690960bc2f44f43b08adfc8e792c2e2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:57,789 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:57,789 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=13, startTime=1732778577173; duration=0sec 2024-11-28T07:22:57,789 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:57,789 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:22:57,843 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:57,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:57,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:57,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/d9e65b3c242f45af95591c0a23d97dd8 2024-11-28T07:22:57,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/55ba571c27e3463f9eaa9f91686ef3dd is 50, key is test_row_0/C:col10/1732778577169/Put/seqid=0 2024-11-28T07:22:57,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778637938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742272_1448 (size=12151) 2024-11-28T07:22:57,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778637957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:57,997 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:57,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:57,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:57,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:57,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:57,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T07:22:58,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:58,157 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/e02959ff9816405e881e05bc2840136f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/e02959ff9816405e881e05bc2840136f 2024-11-28T07:22:58,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:58,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:58,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,197 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into e02959ff9816405e881e05bc2840136f(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-11-28T07:22:58,197 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:58,197 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=13, startTime=1732778577155; duration=1sec 2024-11-28T07:22:58,198 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:58,198 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:22:58,312 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:58,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:58,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:58,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,313 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,367 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/55ba571c27e3463f9eaa9f91686ef3dd 2024-11-28T07:22:58,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/8787b47d0aed467fb4f6c7b17fc82ca5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8787b47d0aed467fb4f6c7b17fc82ca5 2024-11-28T07:22:58,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8787b47d0aed467fb4f6c7b17fc82ca5, entries=250, sequenceid=171, filesize=47.3 K 2024-11-28T07:22:58,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/d9e65b3c242f45af95591c0a23d97dd8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d9e65b3c242f45af95591c0a23d97dd8 2024-11-28T07:22:58,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d9e65b3c242f45af95591c0a23d97dd8, entries=150, sequenceid=171, filesize=11.9 K 2024-11-28T07:22:58,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/55ba571c27e3463f9eaa9f91686ef3dd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/55ba571c27e3463f9eaa9f91686ef3dd 2024-11-28T07:22:58,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/55ba571c27e3463f9eaa9f91686ef3dd, entries=150, sequenceid=171, filesize=11.9 K 2024-11-28T07:22:58,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1234ms, sequenceid=171, compaction requested=false 2024-11-28T07:22:58,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:58,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:58,463 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:22:58,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:58,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:58,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:58,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:58,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:58,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:58,465 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:58,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:58,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:58,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,500 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280752904ea4ff4d8a9e4b37257b356e6f_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778578461/Put/seqid=0 2024-11-28T07:22:58,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:58,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778638494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:58,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:58,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778638513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:58,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742273_1449 (size=14794) 2024-11-28T07:22:58,557 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:58,561 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280752904ea4ff4d8a9e4b37257b356e6f_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280752904ea4ff4d8a9e4b37257b356e6f_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:58,562 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/2dd988d49c644a6db835bf56f4ee6b23, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:58,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/2dd988d49c644a6db835bf56f4ee6b23 is 175, key is test_row_0/A:col10/1732778578461/Put/seqid=0 2024-11-28T07:22:58,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742274_1450 (size=39749) 2024-11-28T07:22:58,614 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/2dd988d49c644a6db835bf56f4ee6b23 2024-11-28T07:22:58,619 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:58,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:58,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:58,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778638614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:58,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778638623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:58,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/b31416096664478dbcb414bc3f1cdc9c is 50, key is test_row_0/B:col10/1732778578461/Put/seqid=0 2024-11-28T07:22:58,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:58,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778638659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:58,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:58,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778638672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:58,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:58,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778638685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:58,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742275_1451 (size=12151) 2024-11-28T07:22:58,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/b31416096664478dbcb414bc3f1cdc9c 2024-11-28T07:22:58,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/17d691049f90413cb0beb4ca0e47f116 is 50, key is test_row_0/C:col10/1732778578461/Put/seqid=0 2024-11-28T07:22:58,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742276_1452 (size=12151) 2024-11-28T07:22:58,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/17d691049f90413cb0beb4ca0e47f116 2024-11-28T07:22:58,774 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:58,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:58,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:58,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/2dd988d49c644a6db835bf56f4ee6b23 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2dd988d49c644a6db835bf56f4ee6b23 2024-11-28T07:22:58,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2dd988d49c644a6db835bf56f4ee6b23, entries=200, sequenceid=198, filesize=38.8 K 2024-11-28T07:22:58,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/b31416096664478dbcb414bc3f1cdc9c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b31416096664478dbcb414bc3f1cdc9c 2024-11-28T07:22:58,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b31416096664478dbcb414bc3f1cdc9c, entries=150, sequenceid=198, filesize=11.9 K 2024-11-28T07:22:58,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/17d691049f90413cb0beb4ca0e47f116 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/17d691049f90413cb0beb4ca0e47f116 2024-11-28T07:22:58,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/17d691049f90413cb0beb4ca0e47f116, entries=150, sequenceid=198, filesize=11.9 K 2024-11-28T07:22:58,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 343ms, sequenceid=198, compaction requested=true 2024-11-28T07:22:58,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:58,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:22:58,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:58,806 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:58,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:22:58,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:58,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:22:58,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:22:58,807 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:58,807 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:58,807 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:22:58,807 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,808 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/e02959ff9816405e881e05bc2840136f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8787b47d0aed467fb4f6c7b17fc82ca5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2dd988d49c644a6db835bf56f4ee6b23] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=116.8 K 2024-11-28T07:22:58,808 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,808 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/e02959ff9816405e881e05bc2840136f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8787b47d0aed467fb4f6c7b17fc82ca5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2dd988d49c644a6db835bf56f4ee6b23] 2024-11-28T07:22:58,808 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:58,808 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:22:58,808 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,809 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/282523c803de4decac59d963b769cb3c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d9e65b3c242f45af95591c0a23d97dd8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b31416096664478dbcb414bc3f1cdc9c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=35.9 K 2024-11-28T07:22:58,809 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e02959ff9816405e881e05bc2840136f, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732778575829 2024-11-28T07:22:58,810 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 282523c803de4decac59d963b769cb3c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732778575829 2024-11-28T07:22:58,810 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8787b47d0aed467fb4f6c7b17fc82ca5, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732778576478 2024-11-28T07:22:58,810 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9e65b3c242f45af95591c0a23d97dd8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732778576478 2024-11-28T07:22:58,811 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dd988d49c644a6db835bf56f4ee6b23, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732778577273 2024-11-28T07:22:58,811 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b31416096664478dbcb414bc3f1cdc9c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732778577273 2024-11-28T07:22:58,839 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:58,840 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:58,841 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/029a63fb351e40bda495cd6f6e08c1ca is 50, key is test_row_0/B:col10/1732778578461/Put/seqid=0 2024-11-28T07:22:58,844 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:22:58,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:58,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:58,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:58,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:58,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:58,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:58,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:58,861 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411287d8c48fe326147639d7a97cf01ec66ec_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:58,863 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411287d8c48fe326147639d7a97cf01ec66ec_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:58,863 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287d8c48fe326147639d7a97cf01ec66ec_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:58,927 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:58,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:58,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:58,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:58,928 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:58,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b9792ad773454eff8be202dae41fd0b7_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778578843/Put/seqid=0 2024-11-28T07:22:58,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742277_1453 (size=12595) 2024-11-28T07:22:58,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742278_1454 (size=4469) 2024-11-28T07:22:58,974 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#386 average throughput is 0.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:58,974 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/31aba582bbbf4d97a6e5cfd730d91eb3 is 175, key is test_row_0/A:col10/1732778578461/Put/seqid=0 2024-11-28T07:22:59,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742279_1455 (size=17284) 2024-11-28T07:22:59,002 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:59,007 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b9792ad773454eff8be202dae41fd0b7_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b9792ad773454eff8be202dae41fd0b7_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:59,008 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/6382666a52b24daa9dfe7ddec2894c4b, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:59,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/6382666a52b24daa9dfe7ddec2894c4b is 175, key is test_row_0/A:col10/1732778578843/Put/seqid=0 2024-11-28T07:22:59,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742280_1456 (size=31549) 2024-11-28T07:22:59,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T07:22:59,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,027 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/31aba582bbbf4d97a6e5cfd730d91eb3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/31aba582bbbf4d97a6e5cfd730d91eb3 2024-11-28T07:22:59,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778639014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,031 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 31aba582bbbf4d97a6e5cfd730d91eb3(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:59,032 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:59,032 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=13, startTime=1732778578806; duration=0sec 2024-11-28T07:22:59,032 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:22:59,032 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:22:59,032 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:22:59,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778639015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,035 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:22:59,035 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:22:59,035 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:59,035 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f690960bc2f44f43b08adfc8e792c2e2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/55ba571c27e3463f9eaa9f91686ef3dd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/17d691049f90413cb0beb4ca0e47f116] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=35.9 K 2024-11-28T07:22:59,036 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting f690960bc2f44f43b08adfc8e792c2e2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732778575829 2024-11-28T07:22:59,036 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 55ba571c27e3463f9eaa9f91686ef3dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732778576478 2024-11-28T07:22:59,037 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 17d691049f90413cb0beb4ca0e47f116, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732778577273 2024-11-28T07:22:59,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742281_1457 (size=48389) 2024-11-28T07:22:59,056 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/6382666a52b24daa9dfe7ddec2894c4b 2024-11-28T07:22:59,061 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#388 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:22:59,061 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b93f9cc4e5b249de826cf6efccd2f43a is 50, key is test_row_0/C:col10/1732778578461/Put/seqid=0 2024-11-28T07:22:59,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/b89b175c192247c4a4cfcc7fed19f40b is 50, key is test_row_0/B:col10/1732778578843/Put/seqid=0 2024-11-28T07:22:59,091 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:59,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:59,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:59,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:59,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:59,092 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:59,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:22:59,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742282_1458 (size=12595) 2024-11-28T07:22:59,113 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b93f9cc4e5b249de826cf6efccd2f43a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b93f9cc4e5b249de826cf6efccd2f43a 2024-11-28T07:22:59,119 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into b93f9cc4e5b249de826cf6efccd2f43a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:59,119 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:59,119 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=13, startTime=1732778578806; duration=0sec 2024-11-28T07:22:59,119 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:59,119 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:22:59,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742283_1459 (size=12151) 2024-11-28T07:22:59,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/b89b175c192247c4a4cfcc7fed19f40b 2024-11-28T07:22:59,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778639128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778639135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/5829f32b1b4b43729d7e4b08de536eba is 50, key is test_row_0/C:col10/1732778578843/Put/seqid=0 2024-11-28T07:22:59,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742284_1460 (size=12151) 2024-11-28T07:22:59,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/5829f32b1b4b43729d7e4b08de536eba 2024-11-28T07:22:59,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/6382666a52b24daa9dfe7ddec2894c4b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/6382666a52b24daa9dfe7ddec2894c4b 2024-11-28T07:22:59,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/6382666a52b24daa9dfe7ddec2894c4b, entries=250, sequenceid=212, filesize=47.3 K 2024-11-28T07:22:59,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/b89b175c192247c4a4cfcc7fed19f40b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b89b175c192247c4a4cfcc7fed19f40b 2024-11-28T07:22:59,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b89b175c192247c4a4cfcc7fed19f40b, entries=150, sequenceid=212, filesize=11.9 K 2024-11-28T07:22:59,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/5829f32b1b4b43729d7e4b08de536eba as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/5829f32b1b4b43729d7e4b08de536eba 2024-11-28T07:22:59,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/5829f32b1b4b43729d7e4b08de536eba, entries=150, sequenceid=212, filesize=11.9 K 2024-11-28T07:22:59,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 386ms, sequenceid=212, compaction requested=false 2024-11-28T07:22:59,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:59,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:22:59,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T07:22:59,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:22:59,245 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T07:22:59,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:22:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:22:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:22:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:22:59,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112861ddac9e63b441dbb8c018f5c109abcb_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778579012/Put/seqid=0 2024-11-28T07:22:59,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742285_1461 (size=12304) 2024-11-28T07:22:59,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:22:59,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:59,369 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/029a63fb351e40bda495cd6f6e08c1ca as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/029a63fb351e40bda495cd6f6e08c1ca 2024-11-28T07:22:59,382 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 029a63fb351e40bda495cd6f6e08c1ca(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:22:59,382 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:22:59,382 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=13, startTime=1732778578806; duration=0sec 2024-11-28T07:22:59,382 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:22:59,382 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:22:59,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778639401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778639402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778639512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778639512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778639719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:22:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778639721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:22:59,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:22:59,728 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112861ddac9e63b441dbb8c018f5c109abcb_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112861ddac9e63b441dbb8c018f5c109abcb_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:22:59,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/51521b374a1240b991c6ac2e05bd553a, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:22:59,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/51521b374a1240b991c6ac2e05bd553a is 175, key is test_row_0/A:col10/1732778579012/Put/seqid=0 2024-11-28T07:22:59,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742286_1462 (size=31105) 2024-11-28T07:22:59,768 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/51521b374a1240b991c6ac2e05bd553a 2024-11-28T07:22:59,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/22a28b4df90e456eb16c9f891a69dbc1 is 50, key is test_row_0/B:col10/1732778579012/Put/seqid=0 2024-11-28T07:22:59,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742287_1463 (size=12151) 2024-11-28T07:23:00,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778640024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:00,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778640026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:00,223 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/22a28b4df90e456eb16c9f891a69dbc1 2024-11-28T07:23:00,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e2d78920b5a04beca995489d1b85960a is 50, key is test_row_0/C:col10/1732778579012/Put/seqid=0 2024-11-28T07:23:00,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742288_1464 (size=12151) 2024-11-28T07:23:00,301 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e2d78920b5a04beca995489d1b85960a 2024-11-28T07:23:00,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/51521b374a1240b991c6ac2e05bd553a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/51521b374a1240b991c6ac2e05bd553a 2024-11-28T07:23:00,315 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/51521b374a1240b991c6ac2e05bd553a, entries=150, sequenceid=236, filesize=30.4 K 2024-11-28T07:23:00,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/22a28b4df90e456eb16c9f891a69dbc1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/22a28b4df90e456eb16c9f891a69dbc1 2024-11-28T07:23:00,320 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/22a28b4df90e456eb16c9f891a69dbc1, entries=150, sequenceid=236, filesize=11.9 K 2024-11-28T07:23:00,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e2d78920b5a04beca995489d1b85960a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e2d78920b5a04beca995489d1b85960a 2024-11-28T07:23:00,324 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e2d78920b5a04beca995489d1b85960a, entries=150, sequenceid=236, filesize=11.9 K 2024-11-28T07:23:00,325 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1080ms, sequenceid=236, compaction requested=true 2024-11-28T07:23:00,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:00,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:00,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-28T07:23:00,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-28T07:23:00,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-28T07:23:00,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4040 sec 2024-11-28T07:23:00,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 3.4090 sec 2024-11-28T07:23:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:00,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T07:23:00,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:00,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:00,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:00,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:00,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:00,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:00,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112810dd9c4cb1394617a83cf3fd6ca798c1_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778580537/Put/seqid=0 2024-11-28T07:23:00,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742289_1465 (size=17284) 2024-11-28T07:23:00,605 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:00,611 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112810dd9c4cb1394617a83cf3fd6ca798c1_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112810dd9c4cb1394617a83cf3fd6ca798c1_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:00,613 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/7c038f6003e6405096206ad73c7a3682, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:00,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/7c038f6003e6405096206ad73c7a3682 is 175, key is test_row_0/A:col10/1732778580537/Put/seqid=0 2024-11-28T07:23:00,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778640619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:00,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:00,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778640622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:00,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742290_1466 (size=48389) 2024-11-28T07:23:00,661 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=254, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/7c038f6003e6405096206ad73c7a3682 2024-11-28T07:23:00,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:00,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778640669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:00,678 DEBUG [Thread-1784 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4184 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:00,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/13d76b9e95f747d7addf53264d4db3ba is 50, key is test_row_0/B:col10/1732778580537/Put/seqid=0 2024-11-28T07:23:00,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:00,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778640700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:00,705 DEBUG [Thread-1786 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4210 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:00,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742291_1467 (size=12151) 2024-11-28T07:23:00,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/13d76b9e95f747d7addf53264d4db3ba 2024-11-28T07:23:00,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/f03f63780e1d4db09904765e03152d32 is 50, key is test_row_0/C:col10/1732778580537/Put/seqid=0 2024-11-28T07:23:00,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:00,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778640728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:00,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:00,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778640729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:00,748 DEBUG [Thread-1780 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4255 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:00,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:00,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778640738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:00,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742292_1468 (size=12151) 2024-11-28T07:23:00,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/f03f63780e1d4db09904765e03152d32 2024-11-28T07:23:00,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/7c038f6003e6405096206ad73c7a3682 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7c038f6003e6405096206ad73c7a3682 2024-11-28T07:23:00,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7c038f6003e6405096206ad73c7a3682, entries=250, sequenceid=254, filesize=47.3 K 2024-11-28T07:23:00,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/13d76b9e95f747d7addf53264d4db3ba as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/13d76b9e95f747d7addf53264d4db3ba 2024-11-28T07:23:00,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/13d76b9e95f747d7addf53264d4db3ba, entries=150, sequenceid=254, filesize=11.9 K 2024-11-28T07:23:00,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/f03f63780e1d4db09904765e03152d32 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f03f63780e1d4db09904765e03152d32 2024-11-28T07:23:00,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f03f63780e1d4db09904765e03152d32, entries=150, sequenceid=254, filesize=11.9 K 2024-11-28T07:23:00,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 261ms, sequenceid=254, compaction requested=true 2024-11-28T07:23:00,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:00,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:00,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:00,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:00,798 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:00,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:00,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:00,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:23:00,798 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:00,805 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:00,805 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 159432 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:00,805 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:23:00,805 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:23:00,805 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:00,805 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:00,806 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/31aba582bbbf4d97a6e5cfd730d91eb3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/6382666a52b24daa9dfe7ddec2894c4b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/51521b374a1240b991c6ac2e05bd553a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7c038f6003e6405096206ad73c7a3682] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=155.7 K 2024-11-28T07:23:00,806 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/029a63fb351e40bda495cd6f6e08c1ca, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b89b175c192247c4a4cfcc7fed19f40b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/22a28b4df90e456eb16c9f891a69dbc1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/13d76b9e95f747d7addf53264d4db3ba] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=47.9 K 2024-11-28T07:23:00,806 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:00,806 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/31aba582bbbf4d97a6e5cfd730d91eb3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/6382666a52b24daa9dfe7ddec2894c4b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/51521b374a1240b991c6ac2e05bd553a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7c038f6003e6405096206ad73c7a3682] 2024-11-28T07:23:00,806 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 029a63fb351e40bda495cd6f6e08c1ca, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732778577273 2024-11-28T07:23:00,806 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31aba582bbbf4d97a6e5cfd730d91eb3, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732778577273 2024-11-28T07:23:00,807 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6382666a52b24daa9dfe7ddec2894c4b, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732778578467 2024-11-28T07:23:00,807 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b89b175c192247c4a4cfcc7fed19f40b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732778578478 2024-11-28T07:23:00,807 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51521b374a1240b991c6ac2e05bd553a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732778578969 2024-11-28T07:23:00,807 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 22a28b4df90e456eb16c9f891a69dbc1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732778578969 2024-11-28T07:23:00,807 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c038f6003e6405096206ad73c7a3682, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778579371 2024-11-28T07:23:00,808 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 13d76b9e95f747d7addf53264d4db3ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778579371 2024-11-28T07:23:00,834 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#397 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:00,834 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/cba77e6548404d5ab8c3d9cf62fe0f8f is 50, key is test_row_0/B:col10/1732778580537/Put/seqid=0 2024-11-28T07:23:00,840 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:00,865 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411287cb72cde0a44402286067b42f291dcdc_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:00,869 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411287cb72cde0a44402286067b42f291dcdc_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:00,869 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287cb72cde0a44402286067b42f291dcdc_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:00,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742293_1469 (size=12731) 2024-11-28T07:23:00,891 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/cba77e6548404d5ab8c3d9cf62fe0f8f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/cba77e6548404d5ab8c3d9cf62fe0f8f 2024-11-28T07:23:00,896 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into cba77e6548404d5ab8c3d9cf62fe0f8f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:00,896 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:00,896 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=12, startTime=1732778580798; duration=0sec 2024-11-28T07:23:00,897 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:00,897 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:23:00,897 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:00,898 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:00,898 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:23:00,899 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:00,899 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b93f9cc4e5b249de826cf6efccd2f43a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/5829f32b1b4b43729d7e4b08de536eba, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e2d78920b5a04beca995489d1b85960a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f03f63780e1d4db09904765e03152d32] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=47.9 K 2024-11-28T07:23:00,899 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b93f9cc4e5b249de826cf6efccd2f43a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732778577273 2024-11-28T07:23:00,899 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5829f32b1b4b43729d7e4b08de536eba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732778578478 2024-11-28T07:23:00,900 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e2d78920b5a04beca995489d1b85960a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732778578969 2024-11-28T07:23:00,900 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting f03f63780e1d4db09904765e03152d32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778579371 2024-11-28T07:23:00,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742294_1470 (size=4469) 2024-11-28T07:23:00,921 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#399 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:00,922 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/4acec8093e6a45f9a4739dae2404dfb0 is 50, key is test_row_0/C:col10/1732778580537/Put/seqid=0 2024-11-28T07:23:00,923 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#398 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:00,924 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/8485331e35d44ad581be838914312f72 is 175, key is test_row_0/A:col10/1732778580537/Put/seqid=0 2024-11-28T07:23:00,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742295_1471 (size=12731) 2024-11-28T07:23:00,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742296_1472 (size=31685) 2024-11-28T07:23:00,964 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/8485331e35d44ad581be838914312f72 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8485331e35d44ad581be838914312f72 2024-11-28T07:23:00,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-28T07:23:00,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:00,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:00,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:00,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:00,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:00,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:00,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:00,971 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 8485331e35d44ad581be838914312f72(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:00,971 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:00,971 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=12, startTime=1732778580798; duration=0sec 2024-11-28T07:23:00,971 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:00,971 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:23:00,972 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/4acec8093e6a45f9a4739dae2404dfb0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/4acec8093e6a45f9a4739dae2404dfb0 2024-11-28T07:23:00,977 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 4acec8093e6a45f9a4739dae2404dfb0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:00,977 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:00,977 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=12, startTime=1732778580798; duration=0sec 2024-11-28T07:23:00,977 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:00,977 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:23:00,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128119a7a5ee1c94c0e93c3b1b1f28502be_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778580618/Put/seqid=0 2024-11-28T07:23:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T07:23:01,028 INFO [Thread-1790 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-28T07:23:01,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:01,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-11-28T07:23:01,031 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T07:23:01,033 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:01,034 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:01,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742297_1473 (size=14994) 2024-11-28T07:23:01,060 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:01,065 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128119a7a5ee1c94c0e93c3b1b1f28502be_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128119a7a5ee1c94c0e93c3b1b1f28502be_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:01,066 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/7da066bb9fd84ee2b25bac726083e94e, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:01,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/7da066bb9fd84ee2b25bac726083e94e is 175, key is test_row_0/A:col10/1732778580618/Put/seqid=0 2024-11-28T07:23:01,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778641058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778641069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742298_1474 (size=39949) 2024-11-28T07:23:01,120 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=275, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/7da066bb9fd84ee2b25bac726083e94e 2024-11-28T07:23:01,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T07:23:01,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/c120dc586ce14cdd9c728477104a132f is 50, key is test_row_0/B:col10/1732778580618/Put/seqid=0 2024-11-28T07:23:01,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778641170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742299_1475 (size=12301) 2024-11-28T07:23:01,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/c120dc586ce14cdd9c728477104a132f 2024-11-28T07:23:01,191 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:01,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-28T07:23:01,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:01,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:01,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:01,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:01,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:01,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:01,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778641187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/1d05da20c84341eea932df9d8ac77f59 is 50, key is test_row_0/C:col10/1732778580618/Put/seqid=0 2024-11-28T07:23:01,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742300_1476 (size=12301) 2024-11-28T07:23:01,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/1d05da20c84341eea932df9d8ac77f59 2024-11-28T07:23:01,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/7da066bb9fd84ee2b25bac726083e94e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7da066bb9fd84ee2b25bac726083e94e 2024-11-28T07:23:01,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7da066bb9fd84ee2b25bac726083e94e, entries=200, sequenceid=275, filesize=39.0 K 2024-11-28T07:23:01,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/c120dc586ce14cdd9c728477104a132f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/c120dc586ce14cdd9c728477104a132f 2024-11-28T07:23:01,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/c120dc586ce14cdd9c728477104a132f, entries=150, sequenceid=275, filesize=12.0 K 2024-11-28T07:23:01,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/1d05da20c84341eea932df9d8ac77f59 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/1d05da20c84341eea932df9d8ac77f59 2024-11-28T07:23:01,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/1d05da20c84341eea932df9d8ac77f59, entries=150, sequenceid=275, filesize=12.0 K 2024-11-28T07:23:01,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 303ms, sequenceid=275, compaction requested=false 2024-11-28T07:23:01,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T07:23:01,344 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:01,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-11-28T07:23:01,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:01,348 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T07:23:01,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:01,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:01,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:01,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:01,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:01,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:01,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285e785c8d9dde4831a673cb7f0639f38c_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778581065/Put/seqid=0 2024-11-28T07:23:01,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742301_1477 (size=12454) 2024-11-28T07:23:01,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:01,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:01,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:01,402 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285e785c8d9dde4831a673cb7f0639f38c_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285e785c8d9dde4831a673cb7f0639f38c_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:01,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4c254e8d90834debbcc765709f6122a7, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:01,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4c254e8d90834debbcc765709f6122a7 is 175, key is test_row_0/A:col10/1732778581065/Put/seqid=0 2024-11-28T07:23:01,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742302_1478 (size=31255) 2024-11-28T07:23:01,444 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4c254e8d90834debbcc765709f6122a7 2024-11-28T07:23:01,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/5d1bb377af394d7093bf2ee41013347e is 50, key is test_row_0/B:col10/1732778581065/Put/seqid=0 2024-11-28T07:23:01,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742303_1479 (size=12301) 2024-11-28T07:23:01,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778641492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778641505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778641607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778641607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T07:23:01,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778641820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:01,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778641824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:01,876 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/5d1bb377af394d7093bf2ee41013347e 2024-11-28T07:23:01,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/48b466a6a0704d93abded583a7fd76f6 is 50, key is test_row_0/C:col10/1732778581065/Put/seqid=0 2024-11-28T07:23:01,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742304_1480 (size=12301) 2024-11-28T07:23:02,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:02,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778642128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:02,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T07:23:02,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:02,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778642151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:02,315 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/48b466a6a0704d93abded583a7fd76f6 2024-11-28T07:23:02,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4c254e8d90834debbcc765709f6122a7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c254e8d90834debbcc765709f6122a7 2024-11-28T07:23:02,327 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c254e8d90834debbcc765709f6122a7, entries=150, sequenceid=293, filesize=30.5 K 2024-11-28T07:23:02,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/5d1bb377af394d7093bf2ee41013347e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5d1bb377af394d7093bf2ee41013347e 2024-11-28T07:23:02,333 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5d1bb377af394d7093bf2ee41013347e, entries=150, sequenceid=293, filesize=12.0 K 2024-11-28T07:23:02,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/48b466a6a0704d93abded583a7fd76f6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/48b466a6a0704d93abded583a7fd76f6 2024-11-28T07:23:02,342 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/48b466a6a0704d93abded583a7fd76f6, entries=150, sequenceid=293, filesize=12.0 K 2024-11-28T07:23:02,343 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 995ms, sequenceid=293, compaction requested=true 2024-11-28T07:23:02,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:02,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:02,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-28T07:23:02,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-11-28T07:23:02,346 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-28T07:23:02,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3110 sec 2024-11-28T07:23:02,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.3180 sec 2024-11-28T07:23:02,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:02,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T07:23:02,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:02,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:02,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:02,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:02,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:02,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:02,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112822fdc2bc7a704f0c98d45bb64bad22a0_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778581476/Put/seqid=0 2024-11-28T07:23:02,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742305_1481 (size=14994) 2024-11-28T07:23:02,684 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:02,690 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112822fdc2bc7a704f0c98d45bb64bad22a0_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112822fdc2bc7a704f0c98d45bb64bad22a0_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:02,692 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d1b3905de4034b8d84f146d7e2612640, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:02,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d1b3905de4034b8d84f146d7e2612640 is 175, key is test_row_0/A:col10/1732778581476/Put/seqid=0 2024-11-28T07:23:02,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:02,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778642704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:02,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:02,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778642706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:02,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742306_1482 (size=39949) 2024-11-28T07:23:02,722 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=314, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d1b3905de4034b8d84f146d7e2612640 2024-11-28T07:23:02,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/3347c07dacde47df93a66dc55257e67e is 50, key is test_row_0/B:col10/1732778581476/Put/seqid=0 2024-11-28T07:23:02,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742307_1483 (size=12301) 2024-11-28T07:23:02,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:02,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778642807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:02,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:02,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778642818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:03,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:03,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778643020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:03,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:03,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778643030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:03,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T07:23:03,137 INFO [Thread-1790 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-28T07:23:03,138 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:03,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-11-28T07:23:03,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-28T07:23:03,140 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:03,140 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:03,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:03,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/3347c07dacde47df93a66dc55257e67e 2024-11-28T07:23:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-28T07:23:03,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/3fd6553a46164e0b8280359d415b3a7e is 50, key is test_row_0/C:col10/1732778581476/Put/seqid=0 2024-11-28T07:23:03,293 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:03,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:03,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:03,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,294 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742308_1484 (size=12301) 2024-11-28T07:23:03,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:03,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778643329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:03,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:03,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778643338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-28T07:23:03,453 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:03,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:03,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:03,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,608 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:03,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:03,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:03,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,609 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/3fd6553a46164e0b8280359d415b3a7e 2024-11-28T07:23:03,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d1b3905de4034b8d84f146d7e2612640 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d1b3905de4034b8d84f146d7e2612640 2024-11-28T07:23:03,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-28T07:23:03,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d1b3905de4034b8d84f146d7e2612640, entries=200, sequenceid=314, filesize=39.0 K 2024-11-28T07:23:03,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/3347c07dacde47df93a66dc55257e67e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3347c07dacde47df93a66dc55257e67e 2024-11-28T07:23:03,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3347c07dacde47df93a66dc55257e67e, entries=150, sequenceid=314, filesize=12.0 K 2024-11-28T07:23:03,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/3fd6553a46164e0b8280359d415b3a7e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/3fd6553a46164e0b8280359d415b3a7e 2024-11-28T07:23:03,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:03,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:03,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:03,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,769 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/3fd6553a46164e0b8280359d415b3a7e, entries=150, sequenceid=314, filesize=12.0 K 2024-11-28T07:23:03,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1133ms, sequenceid=314, compaction requested=true 2024-11-28T07:23:03,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:03,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:03,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:03,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:03,776 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:03,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:03,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:03,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:23:03,776 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:03,777 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142838 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:03,777 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:23:03,777 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,777 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8485331e35d44ad581be838914312f72, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7da066bb9fd84ee2b25bac726083e94e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c254e8d90834debbcc765709f6122a7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d1b3905de4034b8d84f146d7e2612640] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=139.5 K 2024-11-28T07:23:03,778 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:03,778 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,778 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:23:03,778 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8485331e35d44ad581be838914312f72, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7da066bb9fd84ee2b25bac726083e94e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c254e8d90834debbcc765709f6122a7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d1b3905de4034b8d84f146d7e2612640] 2024-11-28T07:23:03,778 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,778 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/cba77e6548404d5ab8c3d9cf62fe0f8f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/c120dc586ce14cdd9c728477104a132f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5d1bb377af394d7093bf2ee41013347e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3347c07dacde47df93a66dc55257e67e] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=48.5 K 2024-11-28T07:23:03,778 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8485331e35d44ad581be838914312f72, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778579371 2024-11-28T07:23:03,778 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting cba77e6548404d5ab8c3d9cf62fe0f8f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778579371 2024-11-28T07:23:03,778 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7da066bb9fd84ee2b25bac726083e94e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732778580615 2024-11-28T07:23:03,780 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c120dc586ce14cdd9c728477104a132f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732778580615 2024-11-28T07:23:03,780 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c254e8d90834debbcc765709f6122a7, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732778581040 2024-11-28T07:23:03,780 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d1bb377af394d7093bf2ee41013347e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732778581040 2024-11-28T07:23:03,781 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1b3905de4034b8d84f146d7e2612640, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732778581436 2024-11-28T07:23:03,781 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 3347c07dacde47df93a66dc55257e67e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732778581476 2024-11-28T07:23:03,808 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#409 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:03,809 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/1972fa74062449daa5ecf6ea887c3dbe is 50, key is test_row_0/B:col10/1732778581476/Put/seqid=0 2024-11-28T07:23:03,832 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:03,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:03,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T07:23:03,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:03,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:03,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:03,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:03,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:03,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:03,872 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411286a486c0f728e4b4c8d8924d69a554c58_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:03,875 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411286a486c0f728e4b4c8d8924d69a554c58_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:03,875 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286a486c0f728e4b4c8d8924d69a554c58_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:03,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742309_1485 (size=13017) 2024-11-28T07:23:03,887 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/1972fa74062449daa5ecf6ea887c3dbe as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1972fa74062449daa5ecf6ea887c3dbe 2024-11-28T07:23:03,893 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 1972fa74062449daa5ecf6ea887c3dbe(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:03,894 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:03,894 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=12, startTime=1732778583775; duration=0sec 2024-11-28T07:23:03,894 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:03,894 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:23:03,894 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:03,895 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:03,895 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:23:03,895 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,895 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/4acec8093e6a45f9a4739dae2404dfb0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/1d05da20c84341eea932df9d8ac77f59, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/48b466a6a0704d93abded583a7fd76f6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/3fd6553a46164e0b8280359d415b3a7e] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=48.5 K 2024-11-28T07:23:03,896 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4acec8093e6a45f9a4739dae2404dfb0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732778579371 2024-11-28T07:23:03,897 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d05da20c84341eea932df9d8ac77f59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732778580615 2024-11-28T07:23:03,897 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 48b466a6a0704d93abded583a7fd76f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732778581040 2024-11-28T07:23:03,897 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fd6553a46164e0b8280359d415b3a7e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732778581476 2024-11-28T07:23:03,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cec68c6c6e3c4f90bf8e084e9864b1af_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778582704/Put/seqid=0 2024-11-28T07:23:03,912 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#412 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:03,912 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/67efd88f290744fc84186963d2ff465e is 50, key is test_row_0/C:col10/1732778581476/Put/seqid=0 2024-11-28T07:23:03,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742310_1486 (size=4469) 2024-11-28T07:23:03,921 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:03,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:03,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:03,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:03,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:03,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742311_1487 (size=12454) 2024-11-28T07:23:03,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:03,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778643980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:03,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742312_1488 (size=13017) 2024-11-28T07:23:04,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778643991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,008 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/67efd88f290744fc84186963d2ff465e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/67efd88f290744fc84186963d2ff465e 2024-11-28T07:23:04,013 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 67efd88f290744fc84186963d2ff465e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:04,013 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:04,013 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=12, startTime=1732778583776; duration=0sec 2024-11-28T07:23:04,013 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:04,013 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:23:04,074 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:04,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:04,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:04,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778644092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778644105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,227 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:04,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:04,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:04,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-28T07:23:04,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778644297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,318 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#410 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:04,318 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/2caf495d2a40400dabd72be647985213 is 175, key is test_row_0/A:col10/1732778581476/Put/seqid=0 2024-11-28T07:23:04,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778644316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742313_1489 (size=31971) 2024-11-28T07:23:04,348 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:04,356 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cec68c6c6e3c4f90bf8e084e9864b1af_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128cec68c6c6e3c4f90bf8e084e9864b1af_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:04,357 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/9475d2e8644f4bb9b25a8b1141d4bd8a, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:04,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/9475d2e8644f4bb9b25a8b1141d4bd8a is 175, key is test_row_0/A:col10/1732778582704/Put/seqid=0 2024-11-28T07:23:04,381 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:04,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:04,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:04,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742314_1490 (size=31255) 2024-11-28T07:23:04,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,383 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/9475d2e8644f4bb9b25a8b1141d4bd8a 2024-11-28T07:23:04,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0a8c561baf8143fe8a09aae374ccd419 is 50, key is test_row_0/B:col10/1732778582704/Put/seqid=0 2024-11-28T07:23:04,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742315_1491 (size=12301) 2024-11-28T07:23:04,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0a8c561baf8143fe8a09aae374ccd419 2024-11-28T07:23:04,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b51ff40ca6e94e45b1ddd26a8dcdc349 is 50, key is test_row_0/C:col10/1732778582704/Put/seqid=0 2024-11-28T07:23:04,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742316_1492 (size=12301) 2024-11-28T07:23:04,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b51ff40ca6e94e45b1ddd26a8dcdc349 2024-11-28T07:23:04,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/9475d2e8644f4bb9b25a8b1141d4bd8a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/9475d2e8644f4bb9b25a8b1141d4bd8a 2024-11-28T07:23:04,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/9475d2e8644f4bb9b25a8b1141d4bd8a, entries=150, sequenceid=330, filesize=30.5 K 2024-11-28T07:23:04,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0a8c561baf8143fe8a09aae374ccd419 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0a8c561baf8143fe8a09aae374ccd419 2024-11-28T07:23:04,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0a8c561baf8143fe8a09aae374ccd419, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T07:23:04,534 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:04,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:04,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:04,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b51ff40ca6e94e45b1ddd26a8dcdc349 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b51ff40ca6e94e45b1ddd26a8dcdc349 2024-11-28T07:23:04,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b51ff40ca6e94e45b1ddd26a8dcdc349, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T07:23:04,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 692ms, sequenceid=330, compaction requested=false 2024-11-28T07:23:04,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:04,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T07:23:04,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:04,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:04,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:04,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:04,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:04,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:04,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:04,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287aef260858f244038e3eb83e5c81aba3_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778583989/Put/seqid=0 2024-11-28T07:23:04,688 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:04,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778644677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:04,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:04,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742317_1493 (size=14994) 2024-11-28T07:23:04,693 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:04,697 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287aef260858f244038e3eb83e5c81aba3_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287aef260858f244038e3eb83e5c81aba3_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:04,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778644685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,699 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/1dc7257fc66f4abdac1b129363643f41, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:04,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56870 deadline: 1732778644687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/1dc7257fc66f4abdac1b129363643f41 is 175, key is test_row_0/A:col10/1732778583989/Put/seqid=0 2024-11-28T07:23:04,700 DEBUG [Thread-1784 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8207 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:04,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56922 deadline: 1732778644710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,720 DEBUG [Thread-1786 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8225 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:04,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742318_1494 (size=39949) 2024-11-28T07:23:04,737 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=353, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/1dc7257fc66f4abdac1b129363643f41 2024-11-28T07:23:04,749 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/2caf495d2a40400dabd72be647985213 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2caf495d2a40400dabd72be647985213 2024-11-28T07:23:04,754 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 2caf495d2a40400dabd72be647985213(size=31.2 K), total size for store is 61.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:04,754 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:04,754 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=12, startTime=1732778583775; duration=0sec 2024-11-28T07:23:04,754 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:04,754 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:23:04,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/30d7ceeff537428e8af1ef219e4b7ab0 is 50, key is test_row_0/B:col10/1732778583989/Put/seqid=0 2024-11-28T07:23:04,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56894 deadline: 1732778644766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,771 DEBUG [Thread-1780 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8278 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:04,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778644790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742319_1495 (size=12301) 2024-11-28T07:23:04,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/30d7ceeff537428e8af1ef219e4b7ab0 2024-11-28T07:23:04,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:04,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778644800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:04,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a2a95ed938914862aa60655cf8052421 is 50, key is test_row_0/C:col10/1732778583989/Put/seqid=0 2024-11-28T07:23:04,841 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:04,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:04,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:04,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:04,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742320_1496 (size=12301) 2024-11-28T07:23:04,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a2a95ed938914862aa60655cf8052421 2024-11-28T07:23:04,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/1dc7257fc66f4abdac1b129363643f41 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/1dc7257fc66f4abdac1b129363643f41 2024-11-28T07:23:04,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/1dc7257fc66f4abdac1b129363643f41, entries=200, sequenceid=353, filesize=39.0 K 2024-11-28T07:23:04,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/30d7ceeff537428e8af1ef219e4b7ab0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/30d7ceeff537428e8af1ef219e4b7ab0 2024-11-28T07:23:04,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/30d7ceeff537428e8af1ef219e4b7ab0, entries=150, sequenceid=353, filesize=12.0 K 2024-11-28T07:23:04,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a2a95ed938914862aa60655cf8052421 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a2a95ed938914862aa60655cf8052421 2024-11-28T07:23:04,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a2a95ed938914862aa60655cf8052421, entries=150, sequenceid=353, filesize=12.0 K 2024-11-28T07:23:04,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 265ms, sequenceid=353, compaction requested=true 2024-11-28T07:23:04,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:04,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:04,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:04,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:04,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:23:04,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:04,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T07:23:04,880 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:04,880 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:04,881 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:04,881 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:23:04,882 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,882 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/67efd88f290744fc84186963d2ff465e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b51ff40ca6e94e45b1ddd26a8dcdc349, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a2a95ed938914862aa60655cf8052421] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=36.7 K 2024-11-28T07:23:04,882 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 67efd88f290744fc84186963d2ff465e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732778581476 2024-11-28T07:23:04,882 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:04,882 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:23:04,882 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,882 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2caf495d2a40400dabd72be647985213, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/9475d2e8644f4bb9b25a8b1141d4bd8a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/1dc7257fc66f4abdac1b129363643f41] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=100.8 K 2024-11-28T07:23:04,882 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,882 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2caf495d2a40400dabd72be647985213, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/9475d2e8644f4bb9b25a8b1141d4bd8a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/1dc7257fc66f4abdac1b129363643f41] 2024-11-28T07:23:04,883 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b51ff40ca6e94e45b1ddd26a8dcdc349, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732778582694 2024-11-28T07:23:04,883 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2caf495d2a40400dabd72be647985213, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732778581476 2024-11-28T07:23:04,884 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a2a95ed938914862aa60655cf8052421, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732778583975 2024-11-28T07:23:04,884 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9475d2e8644f4bb9b25a8b1141d4bd8a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732778582694 2024-11-28T07:23:04,884 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dc7257fc66f4abdac1b129363643f41, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732778583975 2024-11-28T07:23:04,902 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#418 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:04,902 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a8069d0035b54126b51c69e54004843a is 50, key is test_row_0/C:col10/1732778583989/Put/seqid=0 2024-11-28T07:23:04,908 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:04,928 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128576379243d2743ce95e86328c029fc96_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:04,930 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128576379243d2743ce95e86328c029fc96_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:04,930 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128576379243d2743ce95e86328c029fc96_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:04,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742321_1497 (size=13119) 2024-11-28T07:23:04,961 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a8069d0035b54126b51c69e54004843a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a8069d0035b54126b51c69e54004843a 2024-11-28T07:23:04,966 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into a8069d0035b54126b51c69e54004843a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:04,966 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:04,966 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=13, startTime=1732778584880; duration=0sec 2024-11-28T07:23:04,966 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:04,966 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:23:04,966 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:04,968 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:04,968 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:23:04,968 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,969 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1972fa74062449daa5ecf6ea887c3dbe, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0a8c561baf8143fe8a09aae374ccd419, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/30d7ceeff537428e8af1ef219e4b7ab0] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=36.7 K 2024-11-28T07:23:04,969 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1972fa74062449daa5ecf6ea887c3dbe, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1732778581476 2024-11-28T07:23:04,969 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a8c561baf8143fe8a09aae374ccd419, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732778582694 2024-11-28T07:23:04,970 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 30d7ceeff537428e8af1ef219e4b7ab0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732778583975 2024-11-28T07:23:04,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742322_1498 (size=4469) 2024-11-28T07:23:04,986 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#419 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:04,986 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/3e9718af33044b78b82f9a81c4943922 is 175, key is test_row_0/A:col10/1732778583989/Put/seqid=0 2024-11-28T07:23:04,994 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:04,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-11-28T07:23:04,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:04,995 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:23:04,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:04,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:04,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:04,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:04,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:04,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:05,002 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#420 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:05,002 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/a9268685327a40c4a6264e555c834161 is 50, key is test_row_0/B:col10/1732778583989/Put/seqid=0 2024-11-28T07:23:05,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:05,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:05,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742323_1499 (size=32073) 2024-11-28T07:23:05,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288fbf45e991714bd29255b240114429b4_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778584675/Put/seqid=0 2024-11-28T07:23:05,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742324_1500 (size=13119) 2024-11-28T07:23:05,053 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/a9268685327a40c4a6264e555c834161 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/a9268685327a40c4a6264e555c834161 2024-11-28T07:23:05,060 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into a9268685327a40c4a6264e555c834161(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:05,060 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:05,060 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=13, startTime=1732778584880; duration=0sec 2024-11-28T07:23:05,060 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:05,060 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:23:05,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742325_1501 (size=12454) 2024-11-28T07:23:05,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778645105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:05,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778645107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:05,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778645218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:05,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778645222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:05,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-28T07:23:05,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778645426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:05,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778645430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:05,437 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/3e9718af33044b78b82f9a81c4943922 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3e9718af33044b78b82f9a81c4943922 2024-11-28T07:23:05,449 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 3e9718af33044b78b82f9a81c4943922(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:05,449 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:05,449 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=13, startTime=1732778584880; duration=0sec 2024-11-28T07:23:05,449 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:05,450 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:23:05,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:05,492 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288fbf45e991714bd29255b240114429b4_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288fbf45e991714bd29255b240114429b4_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:05,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/75c7333c6b0648dfa4f86ba53a0d2b13, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:05,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/75c7333c6b0648dfa4f86ba53a0d2b13 is 175, key is test_row_0/A:col10/1732778584675/Put/seqid=0 2024-11-28T07:23:05,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742326_1502 (size=31255) 2024-11-28T07:23:05,525 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=370, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/75c7333c6b0648dfa4f86ba53a0d2b13 2024-11-28T07:23:05,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/7b785a3fb79d4c968b8ff644d0258714 is 50, key is test_row_0/B:col10/1732778584675/Put/seqid=0 2024-11-28T07:23:05,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742327_1503 (size=12301) 2024-11-28T07:23:05,591 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/7b785a3fb79d4c968b8ff644d0258714 2024-11-28T07:23:05,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/880fbfbcd5004bfc80d3d741a7798991 is 50, key is test_row_0/C:col10/1732778584675/Put/seqid=0 2024-11-28T07:23:05,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742328_1504 (size=12301) 2024-11-28T07:23:05,648 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/880fbfbcd5004bfc80d3d741a7798991 2024-11-28T07:23:05,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/75c7333c6b0648dfa4f86ba53a0d2b13 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/75c7333c6b0648dfa4f86ba53a0d2b13 2024-11-28T07:23:05,659 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/75c7333c6b0648dfa4f86ba53a0d2b13, entries=150, sequenceid=370, filesize=30.5 K 2024-11-28T07:23:05,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/7b785a3fb79d4c968b8ff644d0258714 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7b785a3fb79d4c968b8ff644d0258714 2024-11-28T07:23:05,667 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7b785a3fb79d4c968b8ff644d0258714, entries=150, sequenceid=370, filesize=12.0 K 2024-11-28T07:23:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/880fbfbcd5004bfc80d3d741a7798991 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/880fbfbcd5004bfc80d3d741a7798991 2024-11-28T07:23:05,674 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/880fbfbcd5004bfc80d3d741a7798991, entries=150, sequenceid=370, filesize=12.0 K 2024-11-28T07:23:05,675 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 680ms, sequenceid=370, compaction requested=false 2024-11-28T07:23:05,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:05,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:05,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-11-28T07:23:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-11-28T07:23:05,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-28T07:23:05,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5360 sec 2024-11-28T07:23:05,681 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 2.5420 sec 2024-11-28T07:23:05,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T07:23:05,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:05,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:05,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:05,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:05,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:05,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:05,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287ba52cdbf3744a28bfc7adfeb3c4aaaf_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778585098/Put/seqid=0 2024-11-28T07:23:05,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778645791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:05,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778645803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:05,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742329_1505 (size=14994) 2024-11-28T07:23:05,861 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:05,865 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287ba52cdbf3744a28bfc7adfeb3c4aaaf_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287ba52cdbf3744a28bfc7adfeb3c4aaaf_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:05,866 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d2b8b405c85b4715992a6924401aef51, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:05,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d2b8b405c85b4715992a6924401aef51 is 175, key is test_row_0/A:col10/1732778585098/Put/seqid=0 2024-11-28T07:23:05,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742330_1506 (size=39949) 2024-11-28T07:23:05,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778645905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:05,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778645918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:06,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778646116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:06,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:06,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778646136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:06,308 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=395, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d2b8b405c85b4715992a6924401aef51 2024-11-28T07:23:06,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/5465f03705de4777a36794853c561766 is 50, key is test_row_0/B:col10/1732778585098/Put/seqid=0 2024-11-28T07:23:06,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742331_1507 (size=12301) 2024-11-28T07:23:06,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:06,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778646428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:06,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:06,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778646450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:06,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/5465f03705de4777a36794853c561766 2024-11-28T07:23:06,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e3d702bd80e04ec7bb30d9945261613b is 50, key is test_row_0/C:col10/1732778585098/Put/seqid=0 2024-11-28T07:23:06,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742332_1508 (size=12301) 2024-11-28T07:23:06,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e3d702bd80e04ec7bb30d9945261613b 2024-11-28T07:23:06,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d2b8b405c85b4715992a6924401aef51 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d2b8b405c85b4715992a6924401aef51 2024-11-28T07:23:06,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d2b8b405c85b4715992a6924401aef51, entries=200, sequenceid=395, filesize=39.0 K 2024-11-28T07:23:06,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/5465f03705de4777a36794853c561766 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5465f03705de4777a36794853c561766 2024-11-28T07:23:06,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5465f03705de4777a36794853c561766, entries=150, sequenceid=395, filesize=12.0 K 2024-11-28T07:23:06,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e3d702bd80e04ec7bb30d9945261613b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e3d702bd80e04ec7bb30d9945261613b 2024-11-28T07:23:06,861 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e3d702bd80e04ec7bb30d9945261613b, entries=150, sequenceid=395, filesize=12.0 K 2024-11-28T07:23:06,862 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1120ms, sequenceid=395, compaction requested=true 2024-11-28T07:23:06,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:06,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:06,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:06,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:06,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:06,862 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:06,862 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:06,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:06,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:06,867 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:06,867 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:23:06,867 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:06,867 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3e9718af33044b78b82f9a81c4943922, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/75c7333c6b0648dfa4f86ba53a0d2b13, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d2b8b405c85b4715992a6924401aef51] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=100.9 K 2024-11-28T07:23:06,868 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:06,868 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3e9718af33044b78b82f9a81c4943922, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/75c7333c6b0648dfa4f86ba53a0d2b13, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d2b8b405c85b4715992a6924401aef51] 2024-11-28T07:23:06,868 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:06,868 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:23:06,868 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:06,868 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/a9268685327a40c4a6264e555c834161, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7b785a3fb79d4c968b8ff644d0258714, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5465f03705de4777a36794853c561766] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=36.8 K 2024-11-28T07:23:06,869 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a9268685327a40c4a6264e555c834161, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732778583975 2024-11-28T07:23:06,869 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e9718af33044b78b82f9a81c4943922, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732778583975 2024-11-28T07:23:06,869 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b785a3fb79d4c968b8ff644d0258714, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732778584674 2024-11-28T07:23:06,870 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5465f03705de4777a36794853c561766, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732778585053 2024-11-28T07:23:06,870 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75c7333c6b0648dfa4f86ba53a0d2b13, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732778584674 2024-11-28T07:23:06,878 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2b8b405c85b4715992a6924401aef51, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732778585053 2024-11-28T07:23:06,899 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#427 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:06,900 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/2cc127e704404049ae1d164ef83d481a is 50, key is test_row_0/B:col10/1732778585098/Put/seqid=0 2024-11-28T07:23:06,908 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:06,910 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411282b536520fed84d229aeaecdf2367e04a_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:06,912 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411282b536520fed84d229aeaecdf2367e04a_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:06,913 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411282b536520fed84d229aeaecdf2367e04a_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:06,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742333_1509 (size=13221) 2024-11-28T07:23:06,937 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/2cc127e704404049ae1d164ef83d481a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/2cc127e704404049ae1d164ef83d481a 2024-11-28T07:23:06,942 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 2cc127e704404049ae1d164ef83d481a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:06,942 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:06,942 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=13, startTime=1732778586862; duration=0sec 2024-11-28T07:23:06,942 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:06,942 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:23:06,942 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:06,944 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:06,944 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:23:06,945 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:06,945 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a8069d0035b54126b51c69e54004843a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/880fbfbcd5004bfc80d3d741a7798991, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e3d702bd80e04ec7bb30d9945261613b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=36.8 K 2024-11-28T07:23:06,945 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a8069d0035b54126b51c69e54004843a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732778583975 2024-11-28T07:23:06,946 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 880fbfbcd5004bfc80d3d741a7798991, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732778584674 2024-11-28T07:23:06,946 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e3d702bd80e04ec7bb30d9945261613b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732778585053 2024-11-28T07:23:06,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:06,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T07:23:06,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:06,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:06,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:06,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:06,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:06,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:06,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742334_1510 (size=4469) 2024-11-28T07:23:06,950 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#428 average throughput is 0.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:06,951 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/8203751f41554a0797ab20bafdd764eb is 175, key is test_row_0/A:col10/1732778585098/Put/seqid=0 2024-11-28T07:23:06,976 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#429 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:06,977 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b4c46bf95fce442c8190a6bba0d42369 is 50, key is test_row_0/C:col10/1732778585098/Put/seqid=0 2024-11-28T07:23:06,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128deec5d1c03544ad0b7ff6dff93df9a0c_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778585790/Put/seqid=0 2024-11-28T07:23:07,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742335_1511 (size=32175) 2024-11-28T07:23:07,013 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/8203751f41554a0797ab20bafdd764eb as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8203751f41554a0797ab20bafdd764eb 2024-11-28T07:23:07,022 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 8203751f41554a0797ab20bafdd764eb(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:07,022 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:07,022 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=13, startTime=1732778586862; duration=0sec 2024-11-28T07:23:07,023 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:07,023 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:23:07,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742336_1512 (size=13221) 2024-11-28T07:23:07,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742337_1513 (size=14994) 2024-11-28T07:23:07,035 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:07,039 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/b4c46bf95fce442c8190a6bba0d42369 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b4c46bf95fce442c8190a6bba0d42369 2024-11-28T07:23:07,043 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128deec5d1c03544ad0b7ff6dff93df9a0c_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128deec5d1c03544ad0b7ff6dff93df9a0c_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:07,050 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d85def69251448e19721051df585c2dd, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:07,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d85def69251448e19721051df585c2dd is 175, key is test_row_0/A:col10/1732778585790/Put/seqid=0 2024-11-28T07:23:07,054 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into b4c46bf95fce442c8190a6bba0d42369(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:07,054 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:07,054 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=13, startTime=1732778586862; duration=0sec 2024-11-28T07:23:07,054 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:07,054 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:23:07,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778647069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:07,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778647079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:07,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742338_1514 (size=39949) 2024-11-28T07:23:07,091 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=410, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d85def69251448e19721051df585c2dd 2024-11-28T07:23:07,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/010ffef1fcbf4696be6735543bf01550 is 50, key is test_row_0/B:col10/1732778585790/Put/seqid=0 2024-11-28T07:23:07,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742339_1515 (size=12301) 2024-11-28T07:23:07,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778647184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:07,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778647188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-28T07:23:07,255 INFO [Thread-1790 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-28T07:23:07,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:07,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-28T07:23:07,259 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:07,260 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:07,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:07,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-28T07:23:07,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-28T07:23:07,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778647393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:07,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778647393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:07,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:07,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:07,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-28T07:23:07,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/010ffef1fcbf4696be6735543bf01550 2024-11-28T07:23:07,566 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:07,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:07,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:07,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:07,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:07,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a85b52d8b00946e782c4c2efe23fd36a is 50, key is test_row_0/C:col10/1732778585790/Put/seqid=0 2024-11-28T07:23:07,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742340_1516 (size=12301) 2024-11-28T07:23:07,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a85b52d8b00946e782c4c2efe23fd36a 2024-11-28T07:23:07,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d85def69251448e19721051df585c2dd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d85def69251448e19721051df585c2dd 2024-11-28T07:23:07,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d85def69251448e19721051df585c2dd, entries=200, sequenceid=410, filesize=39.0 K 2024-11-28T07:23:07,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/010ffef1fcbf4696be6735543bf01550 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/010ffef1fcbf4696be6735543bf01550 2024-11-28T07:23:07,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/010ffef1fcbf4696be6735543bf01550, entries=150, sequenceid=410, filesize=12.0 K 2024-11-28T07:23:07,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a85b52d8b00946e782c4c2efe23fd36a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a85b52d8b00946e782c4c2efe23fd36a 2024-11-28T07:23:07,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a85b52d8b00946e782c4c2efe23fd36a, entries=150, sequenceid=410, filesize=12.0 K 2024-11-28T07:23:07,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 748ms, sequenceid=410, compaction requested=false 2024-11-28T07:23:07,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:07,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T07:23:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:07,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:07,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:07,719 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:07,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:07,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:07,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:07,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:07,720 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128faaa4063b8f94431bb721dbb067c24f9_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778587066/Put/seqid=0 2024-11-28T07:23:07,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742341_1517 (size=14994) 2024-11-28T07:23:07,778 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:07,788 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128faaa4063b8f94431bb721dbb067c24f9_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128faaa4063b8f94431bb721dbb067c24f9_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:07,790 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/3a1c78173f44441a94784b0ba5e2586d, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:07,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/3a1c78173f44441a94784b0ba5e2586d is 175, key is test_row_0/A:col10/1732778587066/Put/seqid=0 2024-11-28T07:23:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778647781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:07,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778647790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:07,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742342_1518 (size=39949) 2024-11-28T07:23:07,825 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=435, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/3a1c78173f44441a94784b0ba5e2586d 2024-11-28T07:23:07,848 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/d18b0e31813d43fb8a6d60efd27c942e is 50, key is test_row_0/B:col10/1732778587066/Put/seqid=0 2024-11-28T07:23:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-28T07:23:07,873 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:07,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:07,873 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:07,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742343_1519 (size=12301) 2024-11-28T07:23:07,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778647892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:07,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:07,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778647906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:08,025 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:08,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:08,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:08,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,026 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:08,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778648100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:08,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:08,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778648119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:08,179 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:08,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:08,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:08,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/d18b0e31813d43fb8a6d60efd27c942e 2024-11-28T07:23:08,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/88933b2c64734f5cb75234f6c662838a is 50, key is test_row_0/C:col10/1732778587066/Put/seqid=0 2024-11-28T07:23:08,335 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:08,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:08,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:08,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742344_1520 (size=12301) 2024-11-28T07:23:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-28T07:23:08,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778648413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:08,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:08,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778648437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:08,488 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:08,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:08,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:08,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,642 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:08,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:08,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:08,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:08,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/88933b2c64734f5cb75234f6c662838a 2024-11-28T07:23:08,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/3a1c78173f44441a94784b0ba5e2586d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3a1c78173f44441a94784b0ba5e2586d 2024-11-28T07:23:08,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3a1c78173f44441a94784b0ba5e2586d, entries=200, sequenceid=435, filesize=39.0 K 2024-11-28T07:23:08,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/d18b0e31813d43fb8a6d60efd27c942e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d18b0e31813d43fb8a6d60efd27c942e 2024-11-28T07:23:08,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d18b0e31813d43fb8a6d60efd27c942e, entries=150, sequenceid=435, filesize=12.0 K 2024-11-28T07:23:08,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/88933b2c64734f5cb75234f6c662838a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/88933b2c64734f5cb75234f6c662838a 2024-11-28T07:23:08,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/88933b2c64734f5cb75234f6c662838a, entries=150, sequenceid=435, filesize=12.0 K 2024-11-28T07:23:08,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1059ms, sequenceid=435, compaction requested=true 2024-11-28T07:23:08,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:08,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:08,768 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:08,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:08,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:08,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:08,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:08,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:08,769 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:08,770 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112073 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:08,770 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:23:08,770 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,770 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8203751f41554a0797ab20bafdd764eb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d85def69251448e19721051df585c2dd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3a1c78173f44441a94784b0ba5e2586d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=109.4 K 2024-11-28T07:23:08,770 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,770 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8203751f41554a0797ab20bafdd764eb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d85def69251448e19721051df585c2dd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3a1c78173f44441a94784b0ba5e2586d] 2024-11-28T07:23:08,770 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:08,770 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:23:08,770 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,770 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/2cc127e704404049ae1d164ef83d481a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/010ffef1fcbf4696be6735543bf01550, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d18b0e31813d43fb8a6d60efd27c942e] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=36.9 K 2024-11-28T07:23:08,771 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cc127e704404049ae1d164ef83d481a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732778585053 2024-11-28T07:23:08,771 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8203751f41554a0797ab20bafdd764eb, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732778585053 2024-11-28T07:23:08,771 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 010ffef1fcbf4696be6735543bf01550, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732778585790 2024-11-28T07:23:08,771 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d85def69251448e19721051df585c2dd, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732778585777 2024-11-28T07:23:08,772 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d18b0e31813d43fb8a6d60efd27c942e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732778587066 2024-11-28T07:23:08,772 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a1c78173f44441a94784b0ba5e2586d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732778586981 2024-11-28T07:23:08,795 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:08,796 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:08,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-28T07:23:08,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,797 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:23:08,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:08,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:08,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:08,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:08,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:08,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:08,804 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#437 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:08,805 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/76220c7208cd4add85bb540e7d1dbcac is 50, key is test_row_0/B:col10/1732778587066/Put/seqid=0 2024-11-28T07:23:08,824 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128aa27960fde494a7aae07ea6839020434_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:08,826 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128aa27960fde494a7aae07ea6839020434_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:08,826 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128aa27960fde494a7aae07ea6839020434_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:08,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b734b784e96f45a2a556eaa0fba57ceb_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778587778/Put/seqid=0 2024-11-28T07:23:08,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742345_1521 (size=13323) 2024-11-28T07:23:08,889 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/76220c7208cd4add85bb540e7d1dbcac as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/76220c7208cd4add85bb540e7d1dbcac 2024-11-28T07:23:08,894 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 76220c7208cd4add85bb540e7d1dbcac(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:08,894 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:08,894 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=13, startTime=1732778588768; duration=0sec 2024-11-28T07:23:08,894 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:08,894 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:23:08,894 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:08,902 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:08,902 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:23:08,902 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:08,902 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b4c46bf95fce442c8190a6bba0d42369, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a85b52d8b00946e782c4c2efe23fd36a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/88933b2c64734f5cb75234f6c662838a] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=36.9 K 2024-11-28T07:23:08,905 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4c46bf95fce442c8190a6bba0d42369, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1732778585053 2024-11-28T07:23:08,909 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a85b52d8b00946e782c4c2efe23fd36a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732778585790 2024-11-28T07:23:08,909 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88933b2c64734f5cb75234f6c662838a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732778587066 2024-11-28T07:23:08,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742347_1523 (size=12454) 2024-11-28T07:23:08,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:08,945 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b734b784e96f45a2a556eaa0fba57ceb_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b734b784e96f45a2a556eaa0fba57ceb_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:08,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/b9ebe2cbc82f4663a7fa54451d00c57f, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:08,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/b9ebe2cbc82f4663a7fa54451d00c57f is 175, key is test_row_0/A:col10/1732778587778/Put/seqid=0 2024-11-28T07:23:08,949 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:08,950 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e23a5b1f4baa44ba9b463d3451eca4cf is 50, key is test_row_0/C:col10/1732778587066/Put/seqid=0 2024-11-28T07:23:08,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:08,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:08,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742346_1522 (size=4469) 2024-11-28T07:23:08,966 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#436 average throughput is 0.14 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:08,966 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/5e3d01406f9349cb9453d763e795e323 is 175, key is test_row_0/A:col10/1732778587066/Put/seqid=0 2024-11-28T07:23:09,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742348_1524 (size=31255) 2024-11-28T07:23:09,009 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=448, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/b9ebe2cbc82f4663a7fa54451d00c57f 2024-11-28T07:23:09,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742349_1525 (size=13323) 2024-11-28T07:23:09,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742350_1526 (size=32277) 2024-11-28T07:23:09,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/e8e4f59235514955910d16176691d310 is 50, key is test_row_0/B:col10/1732778587778/Put/seqid=0 2024-11-28T07:23:09,050 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/5e3d01406f9349cb9453d763e795e323 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/5e3d01406f9349cb9453d763e795e323 2024-11-28T07:23:09,055 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 5e3d01406f9349cb9453d763e795e323(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:09,055 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:09,055 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=13, startTime=1732778588768; duration=0sec 2024-11-28T07:23:09,055 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:09,056 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:23:09,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742351_1527 (size=12301) 2024-11-28T07:23:09,085 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/e8e4f59235514955910d16176691d310 2024-11-28T07:23:09,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/01e53ca7be554bfdb0e1bbba286b9263 is 50, key is test_row_0/C:col10/1732778587778/Put/seqid=0 2024-11-28T07:23:09,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742352_1528 (size=12301) 2024-11-28T07:23:09,153 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/01e53ca7be554bfdb0e1bbba286b9263 2024-11-28T07:23:09,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778649145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778649151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/b9ebe2cbc82f4663a7fa54451d00c57f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/b9ebe2cbc82f4663a7fa54451d00c57f 2024-11-28T07:23:09,166 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/b9ebe2cbc82f4663a7fa54451d00c57f, entries=150, sequenceid=448, filesize=30.5 K 2024-11-28T07:23:09,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/e8e4f59235514955910d16176691d310 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e8e4f59235514955910d16176691d310 2024-11-28T07:23:09,171 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e8e4f59235514955910d16176691d310, entries=150, sequenceid=448, filesize=12.0 K 2024-11-28T07:23:09,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/01e53ca7be554bfdb0e1bbba286b9263 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/01e53ca7be554bfdb0e1bbba286b9263 2024-11-28T07:23:09,192 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/01e53ca7be554bfdb0e1bbba286b9263, entries=150, sequenceid=448, filesize=12.0 K 2024-11-28T07:23:09,194 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 397ms, sequenceid=448, compaction requested=false 2024-11-28T07:23:09,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:09,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:09,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-28T07:23:09,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-28T07:23:09,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-28T07:23:09,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9350 sec 2024-11-28T07:23:09,201 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.9430 sec 2024-11-28T07:23:09,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:23:09,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:09,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:09,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:09,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:09,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:09,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:09,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:09,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280868c5f7a5cd4effa4dd0b0d91ba4b69_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778589101/Put/seqid=0 2024-11-28T07:23:09,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 307 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778649308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778649319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742353_1529 (size=14994) 2024-11-28T07:23:09,336 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:09,341 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280868c5f7a5cd4effa4dd0b0d91ba4b69_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280868c5f7a5cd4effa4dd0b0d91ba4b69_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:09,349 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4c85200818f1453a99e214f946ff0b1c, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:09,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4c85200818f1453a99e214f946ff0b1c is 175, key is test_row_0/A:col10/1732778589101/Put/seqid=0 2024-11-28T07:23:09,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-28T07:23:09,367 INFO [Thread-1790 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-28T07:23:09,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:09,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-28T07:23:09,370 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:09,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-28T07:23:09,371 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:09,371 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:09,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742354_1530 (size=39949) 2024-11-28T07:23:09,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 309 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778649421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,431 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/e23a5b1f4baa44ba9b463d3451eca4cf as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e23a5b1f4baa44ba9b463d3451eca4cf 2024-11-28T07:23:09,435 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into e23a5b1f4baa44ba9b463d3451eca4cf(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:09,435 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:09,435 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=13, startTime=1732778588768; duration=0sec 2024-11-28T07:23:09,435 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:09,435 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:23:09,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778649431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,471 DEBUG [Thread-1799 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47679076 to 127.0.0.1:56318 2024-11-28T07:23:09,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-28T07:23:09,471 DEBUG [Thread-1799 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:09,472 DEBUG [Thread-1791 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x319559be to 127.0.0.1:56318 2024-11-28T07:23:09,472 DEBUG [Thread-1791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:09,476 DEBUG [Thread-1795 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61ec0f48 to 127.0.0.1:56318 2024-11-28T07:23:09,476 DEBUG [Thread-1795 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:09,478 DEBUG [Thread-1793 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c907e21 to 127.0.0.1:56318 2024-11-28T07:23:09,478 DEBUG [Thread-1793 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:09,479 DEBUG [Thread-1797 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7819b9e2 to 127.0.0.1:56318 2024-11-28T07:23:09,479 DEBUG [Thread-1797 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:09,522 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:09,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-28T07:23:09,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:09,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:09,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:09,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 311 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778649630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778649640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-28T07:23:09,677 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:09,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-28T07:23:09,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:09,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:09,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:09,678 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,796 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=475, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4c85200818f1453a99e214f946ff0b1c 2024-11-28T07:23:09,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/77d5e492502d4b8c9dbceecf01f30e2b is 50, key is test_row_0/B:col10/1732778589101/Put/seqid=0 2024-11-28T07:23:09,829 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:09,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-28T07:23:09,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:09,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:09,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:09,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742355_1531 (size=12301) 2024-11-28T07:23:09,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 313 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56866 deadline: 1732778649932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:09,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:56924 deadline: 1732778649943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-28T07:23:09,983 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:09,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-28T07:23:09,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:09,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:09,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:09,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:10,137 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:10,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-28T07:23:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:10,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:10,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:10,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:10,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/77d5e492502d4b8c9dbceecf01f30e2b 2024-11-28T07:23:10,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/603b18bb5e7d493d8b5c1ae23130d79b is 50, key is test_row_0/C:col10/1732778589101/Put/seqid=0 2024-11-28T07:23:10,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742356_1532 (size=12301) 2024-11-28T07:23:10,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=475 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/603b18bb5e7d493d8b5c1ae23130d79b 2024-11-28T07:23:10,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/4c85200818f1453a99e214f946ff0b1c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c85200818f1453a99e214f946ff0b1c 2024-11-28T07:23:10,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c85200818f1453a99e214f946ff0b1c, entries=200, sequenceid=475, filesize=39.0 K 2024-11-28T07:23:10,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/77d5e492502d4b8c9dbceecf01f30e2b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/77d5e492502d4b8c9dbceecf01f30e2b 2024-11-28T07:23:10,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/77d5e492502d4b8c9dbceecf01f30e2b, entries=150, sequenceid=475, filesize=12.0 K 2024-11-28T07:23:10,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/603b18bb5e7d493d8b5c1ae23130d79b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/603b18bb5e7d493d8b5c1ae23130d79b 2024-11-28T07:23:10,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/603b18bb5e7d493d8b5c1ae23130d79b, entries=150, sequenceid=475, filesize=12.0 K 2024-11-28T07:23:10,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 990ms, sequenceid=475, compaction requested=true 2024-11-28T07:23:10,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:10,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:10,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:10,260 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:10,260 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:10,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:10,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:10,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 92fcea6fc878b4b1c7f03e0a8e3d3d00:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:10,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:10,262 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:10,262 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/A is initiating minor compaction (all files) 2024-11-28T07:23:10,262 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/A in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:10,262 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/5e3d01406f9349cb9453d763e795e323, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/b9ebe2cbc82f4663a7fa54451d00c57f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c85200818f1453a99e214f946ff0b1c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=101.1 K 2024-11-28T07:23:10,262 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:10,262 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/5e3d01406f9349cb9453d763e795e323, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/b9ebe2cbc82f4663a7fa54451d00c57f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c85200818f1453a99e214f946ff0b1c] 2024-11-28T07:23:10,263 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e3d01406f9349cb9453d763e795e323, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732778587066 2024-11-28T07:23:10,263 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:10,263 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/B is initiating minor compaction (all files) 2024-11-28T07:23:10,263 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/B in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:10,263 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/76220c7208cd4add85bb540e7d1dbcac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e8e4f59235514955910d16176691d310, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/77d5e492502d4b8c9dbceecf01f30e2b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=37.0 K 2024-11-28T07:23:10,263 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9ebe2cbc82f4663a7fa54451d00c57f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732778587773 2024-11-28T07:23:10,263 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 76220c7208cd4add85bb540e7d1dbcac, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732778587066 2024-11-28T07:23:10,263 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c85200818f1453a99e214f946ff0b1c, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732778589101 2024-11-28T07:23:10,264 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e8e4f59235514955910d16176691d310, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732778587773 2024-11-28T07:23:10,264 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 77d5e492502d4b8c9dbceecf01f30e2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732778589101 2024-11-28T07:23:10,286 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#B#compaction#445 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:10,287 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0d6cad0772ee4010a891838ce254f3ea is 50, key is test_row_0/B:col10/1732778589101/Put/seqid=0 2024-11-28T07:23:10,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:10,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-28T07:23:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:10,291 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:23:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:10,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:10,298 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:10,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742357_1533 (size=13425) 2024-11-28T07:23:10,337 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411288fa5b53d2809487dbe758285561167b1_92fcea6fc878b4b1c7f03e0a8e3d3d00 store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:10,342 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/0d6cad0772ee4010a891838ce254f3ea as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0d6cad0772ee4010a891838ce254f3ea 2024-11-28T07:23:10,349 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411288fa5b53d2809487dbe758285561167b1_92fcea6fc878b4b1c7f03e0a8e3d3d00, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:10,349 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288fa5b53d2809487dbe758285561167b1_92fcea6fc878b4b1c7f03e0a8e3d3d00 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:10,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289ee6817b18064546b9077d7897abc8b6_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778589301/Put/seqid=0 2024-11-28T07:23:10,354 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/B of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 0d6cad0772ee4010a891838ce254f3ea(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:10,354 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:10,354 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/B, priority=13, startTime=1732778590260; duration=0sec 2024-11-28T07:23:10,355 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:10,355 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:B 2024-11-28T07:23:10,355 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:10,356 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:10,356 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): 92fcea6fc878b4b1c7f03e0a8e3d3d00/C is initiating minor compaction (all files) 2024-11-28T07:23:10,356 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 92fcea6fc878b4b1c7f03e0a8e3d3d00/C in TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:10,356 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e23a5b1f4baa44ba9b463d3451eca4cf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/01e53ca7be554bfdb0e1bbba286b9263, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/603b18bb5e7d493d8b5c1ae23130d79b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp, totalSize=37.0 K 2024-11-28T07:23:10,357 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e23a5b1f4baa44ba9b463d3451eca4cf, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732778587066 2024-11-28T07:23:10,357 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 01e53ca7be554bfdb0e1bbba286b9263, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732778587773 2024-11-28T07:23:10,357 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 603b18bb5e7d493d8b5c1ae23130d79b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=475, earliestPutTs=1732778589101 2024-11-28T07:23:10,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742358_1534 (size=12454) 2024-11-28T07:23:10,376 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#C#compaction#448 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:10,376 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/7ca59f99ae824b2fbf077b478415cac6 is 50, key is test_row_0/C:col10/1732778589101/Put/seqid=0 2024-11-28T07:23:10,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742359_1535 (size=4469) 2024-11-28T07:23:10,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742360_1536 (size=13425) 2024-11-28T07:23:10,420 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/7ca59f99ae824b2fbf077b478415cac6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/7ca59f99ae824b2fbf077b478415cac6 2024-11-28T07:23:10,432 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/C of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 7ca59f99ae824b2fbf077b478415cac6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:10,432 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:10,432 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/C, priority=13, startTime=1732778590260; duration=0sec 2024-11-28T07:23:10,433 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:10,433 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:C 2024-11-28T07:23:10,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:10,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. as already flushing 2024-11-28T07:23:10,439 DEBUG [Thread-1788 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3875c8c5 to 127.0.0.1:56318 2024-11-28T07:23:10,439 DEBUG [Thread-1788 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:10,447 DEBUG [Thread-1782 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62f74604 to 127.0.0.1:56318 2024-11-28T07:23:10,448 DEBUG [Thread-1782 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:10,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-28T07:23:10,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:10,785 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289ee6817b18064546b9077d7897abc8b6_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289ee6817b18064546b9077d7897abc8b6_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:10,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/50da106dbba7452fa878a9239f6c79a1, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:10,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/50da106dbba7452fa878a9239f6c79a1 is 175, key is test_row_0/A:col10/1732778589301/Put/seqid=0 2024-11-28T07:23:10,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742361_1537 (size=31255) 2024-11-28T07:23:10,803 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 92fcea6fc878b4b1c7f03e0a8e3d3d00#A#compaction#446 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:10,803 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/41ae9f2a690c4e0581803dc48277b53c is 175, key is test_row_0/A:col10/1732778589101/Put/seqid=0 2024-11-28T07:23:10,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742362_1538 (size=32379) 2024-11-28T07:23:11,196 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=487, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/50da106dbba7452fa878a9239f6c79a1 2024-11-28T07:23:11,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/6bdace686f7f4e5c8d3445d493d60a27 is 50, key is test_row_0/B:col10/1732778589301/Put/seqid=0 2024-11-28T07:23:11,218 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/41ae9f2a690c4e0581803dc48277b53c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/41ae9f2a690c4e0581803dc48277b53c 2024-11-28T07:23:11,224 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 92fcea6fc878b4b1c7f03e0a8e3d3d00/A of 92fcea6fc878b4b1c7f03e0a8e3d3d00 into 41ae9f2a690c4e0581803dc48277b53c(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:11,224 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:11,224 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00., storeName=92fcea6fc878b4b1c7f03e0a8e3d3d00/A, priority=13, startTime=1732778590260; duration=0sec 2024-11-28T07:23:11,224 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:11,224 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 92fcea6fc878b4b1c7f03e0a8e3d3d00:A 2024-11-28T07:23:11,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742363_1539 (size=12301) 2024-11-28T07:23:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-28T07:23:11,655 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/6bdace686f7f4e5c8d3445d493d60a27 2024-11-28T07:23:11,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a7acbbb10a3c44b7b85cad203e623674 is 50, key is test_row_0/C:col10/1732778589301/Put/seqid=0 2024-11-28T07:23:11,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742364_1540 (size=12301) 2024-11-28T07:23:12,064 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a7acbbb10a3c44b7b85cad203e623674 2024-11-28T07:23:12,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/50da106dbba7452fa878a9239f6c79a1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/50da106dbba7452fa878a9239f6c79a1 2024-11-28T07:23:12,070 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/50da106dbba7452fa878a9239f6c79a1, entries=150, sequenceid=487, filesize=30.5 K 2024-11-28T07:23:12,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/6bdace686f7f4e5c8d3445d493d60a27 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/6bdace686f7f4e5c8d3445d493d60a27 2024-11-28T07:23:12,073 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/6bdace686f7f4e5c8d3445d493d60a27, entries=150, sequenceid=487, filesize=12.0 K 2024-11-28T07:23:12,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/a7acbbb10a3c44b7b85cad203e623674 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a7acbbb10a3c44b7b85cad203e623674 2024-11-28T07:23:12,076 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a7acbbb10a3c44b7b85cad203e623674, entries=150, sequenceid=487, filesize=12.0 K 2024-11-28T07:23:12,077 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=13.42 KB/13740 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1786ms, sequenceid=487, compaction requested=false 2024-11-28T07:23:12,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:12,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:12,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-28T07:23:12,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-28T07:23:12,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-28T07:23:12,079 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7070 sec 2024-11-28T07:23:12,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 2.7110 sec 2024-11-28T07:23:12,847 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T07:23:13,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-28T07:23:13,475 INFO [Thread-1790 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-28T07:23:14,787 DEBUG [Thread-1786 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c54a0d3 to 127.0.0.1:56318 2024-11-28T07:23:14,787 DEBUG [Thread-1786 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:14,801 DEBUG [Thread-1784 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49e13594 to 127.0.0.1:56318 2024-11-28T07:23:14,801 DEBUG [Thread-1784 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:14,843 DEBUG [Thread-1780 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75b14fbd to 127.0.0.1:56318 2024-11-28T07:23:14,843 DEBUG [Thread-1780 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 141 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 27 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 162 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1321 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3963 rows 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1308 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3924 rows 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1323 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3967 rows 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1311 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3933 rows 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1329 2024-11-28T07:23:14,844 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3987 rows 2024-11-28T07:23:14,844 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T07:23:14,844 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0bf5e2f0 to 127.0.0.1:56318 2024-11-28T07:23:14,844 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:14,846 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T07:23:14,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T07:23:14,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:14,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T07:23:14,850 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778594849"}]},"ts":"1732778594849"} 2024-11-28T07:23:14,851 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T07:23:14,853 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T07:23:14,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T07:23:14,855 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, UNASSIGN}] 2024-11-28T07:23:14,855 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, UNASSIGN 2024-11-28T07:23:14,856 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=92fcea6fc878b4b1c7f03e0a8e3d3d00, regionState=CLOSING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:23:14,856 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T07:23:14,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure 92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:23:14,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T07:23:15,008 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:15,008 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:15,008 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T07:23:15,008 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 92fcea6fc878b4b1c7f03e0a8e3d3d00, disabling compactions & flushes 2024-11-28T07:23:15,008 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:15,008 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:15,008 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. after waiting 0 ms 2024-11-28T07:23:15,008 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:15,008 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(2837): Flushing 92fcea6fc878b4b1c7f03e0a8e3d3d00 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-28T07:23:15,009 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=A 2024-11-28T07:23:15,009 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:15,009 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=B 2024-11-28T07:23:15,009 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:15,009 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 92fcea6fc878b4b1c7f03e0a8e3d3d00, store=C 2024-11-28T07:23:15,009 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:15,013 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ed0bb409eaba47c1ad08061993453795_92fcea6fc878b4b1c7f03e0a8e3d3d00 is 50, key is test_row_0/A:col10/1732778594800/Put/seqid=0 2024-11-28T07:23:15,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742365_1541 (size=9914) 2024-11-28T07:23:15,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T07:23:15,417 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:15,420 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ed0bb409eaba47c1ad08061993453795_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ed0bb409eaba47c1ad08061993453795_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:15,421 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d5036b574bbe47e4a8df5c2d4bdcac00, store: [table=TestAcidGuarantees family=A region=92fcea6fc878b4b1c7f03e0a8e3d3d00] 2024-11-28T07:23:15,422 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d5036b574bbe47e4a8df5c2d4bdcac00 is 175, key is test_row_0/A:col10/1732778594800/Put/seqid=0 2024-11-28T07:23:15,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742366_1542 (size=22561) 2024-11-28T07:23:15,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T07:23:15,825 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=498, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d5036b574bbe47e4a8df5c2d4bdcac00 2024-11-28T07:23:15,832 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/8d36c3edf2e44a17a6485fb220f1d7ee is 50, key is test_row_0/B:col10/1732778594800/Put/seqid=0 2024-11-28T07:23:15,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742367_1543 (size=9857) 2024-11-28T07:23:15,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T07:23:16,235 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/8d36c3edf2e44a17a6485fb220f1d7ee 2024-11-28T07:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/75efa33508cf4537b53712a13930e9fe is 50, key is test_row_0/C:col10/1732778594800/Put/seqid=0 2024-11-28T07:23:16,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742368_1544 (size=9857) 2024-11-28T07:23:16,308 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/18ee6aab1c8142bfb9d2f0a9ac488283, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/61d0478abd3b490893175f211f3a72f8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/078cd58f6fb644faad79887780cf4395, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/37b20a35ef5341bcad84ac08c37e9813, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/185daed9bdea4dc6906df0332ea97908, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d22273e218114309b51923a0374185b7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/549c1e18603d4d1185007db3df4e0cd1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4b269ff1f923457bb6e8ba6baebdd19e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/fc5850c309304d1fb33f81f8471ccac8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/e02959ff9816405e881e05bc2840136f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/97f223726ad8436ab7a7aea8531e6216, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8787b47d0aed467fb4f6c7b17fc82ca5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2dd988d49c644a6db835bf56f4ee6b23, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/31aba582bbbf4d97a6e5cfd730d91eb3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/6382666a52b24daa9dfe7ddec2894c4b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/51521b374a1240b991c6ac2e05bd553a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7c038f6003e6405096206ad73c7a3682, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8485331e35d44ad581be838914312f72, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7da066bb9fd84ee2b25bac726083e94e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c254e8d90834debbcc765709f6122a7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d1b3905de4034b8d84f146d7e2612640, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2caf495d2a40400dabd72be647985213, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/9475d2e8644f4bb9b25a8b1141d4bd8a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/1dc7257fc66f4abdac1b129363643f41, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3e9718af33044b78b82f9a81c4943922, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/75c7333c6b0648dfa4f86ba53a0d2b13, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d2b8b405c85b4715992a6924401aef51, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8203751f41554a0797ab20bafdd764eb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d85def69251448e19721051df585c2dd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3a1c78173f44441a94784b0ba5e2586d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/5e3d01406f9349cb9453d763e795e323, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/b9ebe2cbc82f4663a7fa54451d00c57f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c85200818f1453a99e214f946ff0b1c] to archive 2024-11-28T07:23:16,309 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:23:16,310 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/18ee6aab1c8142bfb9d2f0a9ac488283 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/18ee6aab1c8142bfb9d2f0a9ac488283 2024-11-28T07:23:16,311 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/61d0478abd3b490893175f211f3a72f8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/61d0478abd3b490893175f211f3a72f8 2024-11-28T07:23:16,312 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/078cd58f6fb644faad79887780cf4395 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/078cd58f6fb644faad79887780cf4395 2024-11-28T07:23:16,313 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/37b20a35ef5341bcad84ac08c37e9813 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/37b20a35ef5341bcad84ac08c37e9813 2024-11-28T07:23:16,314 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/185daed9bdea4dc6906df0332ea97908 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/185daed9bdea4dc6906df0332ea97908 2024-11-28T07:23:16,315 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d22273e218114309b51923a0374185b7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d22273e218114309b51923a0374185b7 2024-11-28T07:23:16,316 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/549c1e18603d4d1185007db3df4e0cd1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/549c1e18603d4d1185007db3df4e0cd1 2024-11-28T07:23:16,317 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4b269ff1f923457bb6e8ba6baebdd19e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4b269ff1f923457bb6e8ba6baebdd19e 2024-11-28T07:23:16,318 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/fc5850c309304d1fb33f81f8471ccac8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/fc5850c309304d1fb33f81f8471ccac8 2024-11-28T07:23:16,319 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/e02959ff9816405e881e05bc2840136f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/e02959ff9816405e881e05bc2840136f 2024-11-28T07:23:16,320 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/97f223726ad8436ab7a7aea8531e6216 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/97f223726ad8436ab7a7aea8531e6216 2024-11-28T07:23:16,321 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8787b47d0aed467fb4f6c7b17fc82ca5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8787b47d0aed467fb4f6c7b17fc82ca5 2024-11-28T07:23:16,322 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2dd988d49c644a6db835bf56f4ee6b23 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2dd988d49c644a6db835bf56f4ee6b23 2024-11-28T07:23:16,322 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/31aba582bbbf4d97a6e5cfd730d91eb3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/31aba582bbbf4d97a6e5cfd730d91eb3 2024-11-28T07:23:16,323 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/6382666a52b24daa9dfe7ddec2894c4b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/6382666a52b24daa9dfe7ddec2894c4b 2024-11-28T07:23:16,324 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/51521b374a1240b991c6ac2e05bd553a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/51521b374a1240b991c6ac2e05bd553a 2024-11-28T07:23:16,325 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7c038f6003e6405096206ad73c7a3682 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7c038f6003e6405096206ad73c7a3682 2024-11-28T07:23:16,326 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8485331e35d44ad581be838914312f72 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8485331e35d44ad581be838914312f72 2024-11-28T07:23:16,327 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7da066bb9fd84ee2b25bac726083e94e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/7da066bb9fd84ee2b25bac726083e94e 2024-11-28T07:23:16,328 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c254e8d90834debbcc765709f6122a7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c254e8d90834debbcc765709f6122a7 2024-11-28T07:23:16,329 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d1b3905de4034b8d84f146d7e2612640 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d1b3905de4034b8d84f146d7e2612640 2024-11-28T07:23:16,330 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2caf495d2a40400dabd72be647985213 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/2caf495d2a40400dabd72be647985213 2024-11-28T07:23:16,331 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/9475d2e8644f4bb9b25a8b1141d4bd8a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/9475d2e8644f4bb9b25a8b1141d4bd8a 2024-11-28T07:23:16,332 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/1dc7257fc66f4abdac1b129363643f41 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/1dc7257fc66f4abdac1b129363643f41 2024-11-28T07:23:16,333 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3e9718af33044b78b82f9a81c4943922 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3e9718af33044b78b82f9a81c4943922 2024-11-28T07:23:16,334 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/75c7333c6b0648dfa4f86ba53a0d2b13 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/75c7333c6b0648dfa4f86ba53a0d2b13 2024-11-28T07:23:16,335 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d2b8b405c85b4715992a6924401aef51 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d2b8b405c85b4715992a6924401aef51 2024-11-28T07:23:16,336 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8203751f41554a0797ab20bafdd764eb to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/8203751f41554a0797ab20bafdd764eb 2024-11-28T07:23:16,336 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d85def69251448e19721051df585c2dd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d85def69251448e19721051df585c2dd 2024-11-28T07:23:16,337 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3a1c78173f44441a94784b0ba5e2586d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/3a1c78173f44441a94784b0ba5e2586d 2024-11-28T07:23:16,338 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/5e3d01406f9349cb9453d763e795e323 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/5e3d01406f9349cb9453d763e795e323 2024-11-28T07:23:16,339 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/b9ebe2cbc82f4663a7fa54451d00c57f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/b9ebe2cbc82f4663a7fa54451d00c57f 2024-11-28T07:23:16,340 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c85200818f1453a99e214f946ff0b1c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/4c85200818f1453a99e214f946ff0b1c 2024-11-28T07:23:16,344 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3fcda4e155bf4b7883c6cd43c8a959fd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0cc65cdba1de4b4d84092bb7b41eb6e6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/96a9b08d91aa43eaa6b8c65bdf860bb2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7d15c14ebdc84fb8ab4a925be27a8136, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/23731effc2e0478b844a537688d0974d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/9ff7f40e45d446c6876b34d2be0556cb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1200a41591e641769e97ae7ed188a1a5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e3fe6557aae44c46a766d9981d5a7132, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/204a6e0146974d2ab2305140da7e0b1b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/282523c803de4decac59d963b769cb3c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0308c4a2f2c04541bf897ff2281d8ace, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d9e65b3c242f45af95591c0a23d97dd8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/029a63fb351e40bda495cd6f6e08c1ca, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b31416096664478dbcb414bc3f1cdc9c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b89b175c192247c4a4cfcc7fed19f40b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/22a28b4df90e456eb16c9f891a69dbc1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/cba77e6548404d5ab8c3d9cf62fe0f8f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/13d76b9e95f747d7addf53264d4db3ba, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/c120dc586ce14cdd9c728477104a132f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5d1bb377af394d7093bf2ee41013347e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1972fa74062449daa5ecf6ea887c3dbe, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3347c07dacde47df93a66dc55257e67e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0a8c561baf8143fe8a09aae374ccd419, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/a9268685327a40c4a6264e555c834161, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/30d7ceeff537428e8af1ef219e4b7ab0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7b785a3fb79d4c968b8ff644d0258714, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/2cc127e704404049ae1d164ef83d481a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5465f03705de4777a36794853c561766, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/010ffef1fcbf4696be6735543bf01550, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/76220c7208cd4add85bb540e7d1dbcac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d18b0e31813d43fb8a6d60efd27c942e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e8e4f59235514955910d16176691d310, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/77d5e492502d4b8c9dbceecf01f30e2b] to archive 2024-11-28T07:23:16,345 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:23:16,346 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3fcda4e155bf4b7883c6cd43c8a959fd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3fcda4e155bf4b7883c6cd43c8a959fd 2024-11-28T07:23:16,346 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0cc65cdba1de4b4d84092bb7b41eb6e6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0cc65cdba1de4b4d84092bb7b41eb6e6 2024-11-28T07:23:16,347 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/96a9b08d91aa43eaa6b8c65bdf860bb2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/96a9b08d91aa43eaa6b8c65bdf860bb2 2024-11-28T07:23:16,348 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7d15c14ebdc84fb8ab4a925be27a8136 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7d15c14ebdc84fb8ab4a925be27a8136 2024-11-28T07:23:16,349 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/23731effc2e0478b844a537688d0974d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/23731effc2e0478b844a537688d0974d 2024-11-28T07:23:16,350 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/9ff7f40e45d446c6876b34d2be0556cb to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/9ff7f40e45d446c6876b34d2be0556cb 2024-11-28T07:23:16,351 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1200a41591e641769e97ae7ed188a1a5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1200a41591e641769e97ae7ed188a1a5 2024-11-28T07:23:16,352 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e3fe6557aae44c46a766d9981d5a7132 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e3fe6557aae44c46a766d9981d5a7132 2024-11-28T07:23:16,352 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/204a6e0146974d2ab2305140da7e0b1b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/204a6e0146974d2ab2305140da7e0b1b 2024-11-28T07:23:16,353 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/282523c803de4decac59d963b769cb3c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/282523c803de4decac59d963b769cb3c 2024-11-28T07:23:16,354 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0308c4a2f2c04541bf897ff2281d8ace to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0308c4a2f2c04541bf897ff2281d8ace 2024-11-28T07:23:16,355 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d9e65b3c242f45af95591c0a23d97dd8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d9e65b3c242f45af95591c0a23d97dd8 2024-11-28T07:23:16,356 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/029a63fb351e40bda495cd6f6e08c1ca to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/029a63fb351e40bda495cd6f6e08c1ca 2024-11-28T07:23:16,357 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b31416096664478dbcb414bc3f1cdc9c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b31416096664478dbcb414bc3f1cdc9c 2024-11-28T07:23:16,358 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b89b175c192247c4a4cfcc7fed19f40b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/b89b175c192247c4a4cfcc7fed19f40b 2024-11-28T07:23:16,359 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/22a28b4df90e456eb16c9f891a69dbc1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/22a28b4df90e456eb16c9f891a69dbc1 2024-11-28T07:23:16,360 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/cba77e6548404d5ab8c3d9cf62fe0f8f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/cba77e6548404d5ab8c3d9cf62fe0f8f 2024-11-28T07:23:16,361 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/13d76b9e95f747d7addf53264d4db3ba to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/13d76b9e95f747d7addf53264d4db3ba 2024-11-28T07:23:16,362 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/c120dc586ce14cdd9c728477104a132f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/c120dc586ce14cdd9c728477104a132f 2024-11-28T07:23:16,363 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5d1bb377af394d7093bf2ee41013347e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5d1bb377af394d7093bf2ee41013347e 2024-11-28T07:23:16,364 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1972fa74062449daa5ecf6ea887c3dbe to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/1972fa74062449daa5ecf6ea887c3dbe 2024-11-28T07:23:16,365 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3347c07dacde47df93a66dc55257e67e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/3347c07dacde47df93a66dc55257e67e 2024-11-28T07:23:16,365 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0a8c561baf8143fe8a09aae374ccd419 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0a8c561baf8143fe8a09aae374ccd419 2024-11-28T07:23:16,366 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/a9268685327a40c4a6264e555c834161 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/a9268685327a40c4a6264e555c834161 2024-11-28T07:23:16,367 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/30d7ceeff537428e8af1ef219e4b7ab0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/30d7ceeff537428e8af1ef219e4b7ab0 2024-11-28T07:23:16,368 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7b785a3fb79d4c968b8ff644d0258714 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/7b785a3fb79d4c968b8ff644d0258714 2024-11-28T07:23:16,369 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/2cc127e704404049ae1d164ef83d481a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/2cc127e704404049ae1d164ef83d481a 2024-11-28T07:23:16,370 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5465f03705de4777a36794853c561766 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/5465f03705de4777a36794853c561766 2024-11-28T07:23:16,371 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/010ffef1fcbf4696be6735543bf01550 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/010ffef1fcbf4696be6735543bf01550 2024-11-28T07:23:16,372 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/76220c7208cd4add85bb540e7d1dbcac to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/76220c7208cd4add85bb540e7d1dbcac 2024-11-28T07:23:16,373 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d18b0e31813d43fb8a6d60efd27c942e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/d18b0e31813d43fb8a6d60efd27c942e 2024-11-28T07:23:16,374 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e8e4f59235514955910d16176691d310 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/e8e4f59235514955910d16176691d310 2024-11-28T07:23:16,375 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/77d5e492502d4b8c9dbceecf01f30e2b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/77d5e492502d4b8c9dbceecf01f30e2b 2024-11-28T07:23:16,380 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/847dca02e5364844818fdbf6c8cba976, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e093231cbb074d05a9c0bace2071ed4c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/6961a6b043a24a58a7853094444c61cb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f680b2e931544ee7bde9e73bc2a8f3eb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/eb6534af51c64002aa2b386f7229a08c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/be5e60a51cd24357a7490ee5e747f419, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e6ff40602c2e4743b4cb9c86c1a8749f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b87fbbb61ae54c3bbedec1ba9e821ae3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/2f2b0843cf724ef1956bcda1f064710a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f690960bc2f44f43b08adfc8e792c2e2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/306f49505a08410cb23195a42ee7d86c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/55ba571c27e3463f9eaa9f91686ef3dd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b93f9cc4e5b249de826cf6efccd2f43a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/17d691049f90413cb0beb4ca0e47f116, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/5829f32b1b4b43729d7e4b08de536eba, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e2d78920b5a04beca995489d1b85960a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/4acec8093e6a45f9a4739dae2404dfb0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f03f63780e1d4db09904765e03152d32, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/1d05da20c84341eea932df9d8ac77f59, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/48b466a6a0704d93abded583a7fd76f6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/67efd88f290744fc84186963d2ff465e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/3fd6553a46164e0b8280359d415b3a7e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b51ff40ca6e94e45b1ddd26a8dcdc349, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a8069d0035b54126b51c69e54004843a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a2a95ed938914862aa60655cf8052421, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/880fbfbcd5004bfc80d3d741a7798991, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b4c46bf95fce442c8190a6bba0d42369, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e3d702bd80e04ec7bb30d9945261613b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a85b52d8b00946e782c4c2efe23fd36a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e23a5b1f4baa44ba9b463d3451eca4cf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/88933b2c64734f5cb75234f6c662838a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/01e53ca7be554bfdb0e1bbba286b9263, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/603b18bb5e7d493d8b5c1ae23130d79b] to archive 2024-11-28T07:23:16,381 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:23:16,382 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/847dca02e5364844818fdbf6c8cba976 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/847dca02e5364844818fdbf6c8cba976 2024-11-28T07:23:16,383 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e093231cbb074d05a9c0bace2071ed4c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e093231cbb074d05a9c0bace2071ed4c 2024-11-28T07:23:16,384 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/6961a6b043a24a58a7853094444c61cb to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/6961a6b043a24a58a7853094444c61cb 2024-11-28T07:23:16,385 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f680b2e931544ee7bde9e73bc2a8f3eb to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f680b2e931544ee7bde9e73bc2a8f3eb 2024-11-28T07:23:16,386 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/eb6534af51c64002aa2b386f7229a08c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/eb6534af51c64002aa2b386f7229a08c 2024-11-28T07:23:16,387 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/be5e60a51cd24357a7490ee5e747f419 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/be5e60a51cd24357a7490ee5e747f419 2024-11-28T07:23:16,388 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e6ff40602c2e4743b4cb9c86c1a8749f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e6ff40602c2e4743b4cb9c86c1a8749f 2024-11-28T07:23:16,388 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b87fbbb61ae54c3bbedec1ba9e821ae3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b87fbbb61ae54c3bbedec1ba9e821ae3 2024-11-28T07:23:16,389 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/2f2b0843cf724ef1956bcda1f064710a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/2f2b0843cf724ef1956bcda1f064710a 2024-11-28T07:23:16,390 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f690960bc2f44f43b08adfc8e792c2e2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f690960bc2f44f43b08adfc8e792c2e2 2024-11-28T07:23:16,391 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/306f49505a08410cb23195a42ee7d86c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/306f49505a08410cb23195a42ee7d86c 2024-11-28T07:23:16,391 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/55ba571c27e3463f9eaa9f91686ef3dd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/55ba571c27e3463f9eaa9f91686ef3dd 2024-11-28T07:23:16,392 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b93f9cc4e5b249de826cf6efccd2f43a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b93f9cc4e5b249de826cf6efccd2f43a 2024-11-28T07:23:16,393 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/17d691049f90413cb0beb4ca0e47f116 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/17d691049f90413cb0beb4ca0e47f116 2024-11-28T07:23:16,394 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/5829f32b1b4b43729d7e4b08de536eba to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/5829f32b1b4b43729d7e4b08de536eba 2024-11-28T07:23:16,395 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e2d78920b5a04beca995489d1b85960a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e2d78920b5a04beca995489d1b85960a 2024-11-28T07:23:16,395 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/4acec8093e6a45f9a4739dae2404dfb0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/4acec8093e6a45f9a4739dae2404dfb0 2024-11-28T07:23:16,396 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f03f63780e1d4db09904765e03152d32 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/f03f63780e1d4db09904765e03152d32 2024-11-28T07:23:16,397 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/1d05da20c84341eea932df9d8ac77f59 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/1d05da20c84341eea932df9d8ac77f59 2024-11-28T07:23:16,398 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/48b466a6a0704d93abded583a7fd76f6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/48b466a6a0704d93abded583a7fd76f6 2024-11-28T07:23:16,399 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/67efd88f290744fc84186963d2ff465e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/67efd88f290744fc84186963d2ff465e 2024-11-28T07:23:16,400 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/3fd6553a46164e0b8280359d415b3a7e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/3fd6553a46164e0b8280359d415b3a7e 2024-11-28T07:23:16,401 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b51ff40ca6e94e45b1ddd26a8dcdc349 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b51ff40ca6e94e45b1ddd26a8dcdc349 2024-11-28T07:23:16,402 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a8069d0035b54126b51c69e54004843a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a8069d0035b54126b51c69e54004843a 2024-11-28T07:23:16,403 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a2a95ed938914862aa60655cf8052421 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a2a95ed938914862aa60655cf8052421 2024-11-28T07:23:16,404 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/880fbfbcd5004bfc80d3d741a7798991 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/880fbfbcd5004bfc80d3d741a7798991 2024-11-28T07:23:16,405 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b4c46bf95fce442c8190a6bba0d42369 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/b4c46bf95fce442c8190a6bba0d42369 2024-11-28T07:23:16,406 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e3d702bd80e04ec7bb30d9945261613b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e3d702bd80e04ec7bb30d9945261613b 2024-11-28T07:23:16,406 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a85b52d8b00946e782c4c2efe23fd36a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a85b52d8b00946e782c4c2efe23fd36a 2024-11-28T07:23:16,407 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e23a5b1f4baa44ba9b463d3451eca4cf to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/e23a5b1f4baa44ba9b463d3451eca4cf 2024-11-28T07:23:16,408 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/88933b2c64734f5cb75234f6c662838a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/88933b2c64734f5cb75234f6c662838a 2024-11-28T07:23:16,409 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/01e53ca7be554bfdb0e1bbba286b9263 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/01e53ca7be554bfdb0e1bbba286b9263 2024-11-28T07:23:16,410 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/592d8b721726:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/603b18bb5e7d493d8b5c1ae23130d79b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/603b18bb5e7d493d8b5c1ae23130d79b 2024-11-28T07:23:16,645 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=498 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/75efa33508cf4537b53712a13930e9fe 2024-11-28T07:23:16,648 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/A/d5036b574bbe47e4a8df5c2d4bdcac00 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d5036b574bbe47e4a8df5c2d4bdcac00 2024-11-28T07:23:16,650 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d5036b574bbe47e4a8df5c2d4bdcac00, entries=100, sequenceid=498, filesize=22.0 K 2024-11-28T07:23:16,651 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/B/8d36c3edf2e44a17a6485fb220f1d7ee as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/8d36c3edf2e44a17a6485fb220f1d7ee 2024-11-28T07:23:16,653 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/8d36c3edf2e44a17a6485fb220f1d7ee, entries=100, sequenceid=498, filesize=9.6 K 2024-11-28T07:23:16,654 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/.tmp/C/75efa33508cf4537b53712a13930e9fe as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/75efa33508cf4537b53712a13930e9fe 2024-11-28T07:23:16,656 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/75efa33508cf4537b53712a13930e9fe, entries=100, sequenceid=498, filesize=9.6 K 2024-11-28T07:23:16,656 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 92fcea6fc878b4b1c7f03e0a8e3d3d00 in 1648ms, sequenceid=498, compaction requested=true 2024-11-28T07:23:16,660 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/recovered.edits/501.seqid, newMaxSeqId=501, maxSeqId=4 2024-11-28T07:23:16,661 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00. 2024-11-28T07:23:16,661 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 92fcea6fc878b4b1c7f03e0a8e3d3d00: 2024-11-28T07:23:16,662 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,663 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=92fcea6fc878b4b1c7f03e0a8e3d3d00, regionState=CLOSED 2024-11-28T07:23:16,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-28T07:23:16,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure 92fcea6fc878b4b1c7f03e0a8e3d3d00, server=592d8b721726,33143,1732778474488 in 1.8070 sec 2024-11-28T07:23:16,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=131 2024-11-28T07:23:16,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=131, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=92fcea6fc878b4b1c7f03e0a8e3d3d00, UNASSIGN in 1.8100 sec 2024-11-28T07:23:16,667 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-28T07:23:16,667 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8120 sec 2024-11-28T07:23:16,668 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778596668"}]},"ts":"1732778596668"} 2024-11-28T07:23:16,669 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T07:23:16,671 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T07:23:16,673 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8260 sec 2024-11-28T07:23:16,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T07:23:16,953 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-28T07:23:16,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T07:23:16,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:16,955 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=134, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:16,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-28T07:23:16,955 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=134, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:16,957 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,959 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/recovered.edits] 2024-11-28T07:23:16,962 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/41ae9f2a690c4e0581803dc48277b53c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/41ae9f2a690c4e0581803dc48277b53c 2024-11-28T07:23:16,963 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/50da106dbba7452fa878a9239f6c79a1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/50da106dbba7452fa878a9239f6c79a1 2024-11-28T07:23:16,964 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d5036b574bbe47e4a8df5c2d4bdcac00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/A/d5036b574bbe47e4a8df5c2d4bdcac00 2024-11-28T07:23:16,966 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0d6cad0772ee4010a891838ce254f3ea to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/0d6cad0772ee4010a891838ce254f3ea 2024-11-28T07:23:16,968 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/6bdace686f7f4e5c8d3445d493d60a27 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/6bdace686f7f4e5c8d3445d493d60a27 2024-11-28T07:23:16,969 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/8d36c3edf2e44a17a6485fb220f1d7ee to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/B/8d36c3edf2e44a17a6485fb220f1d7ee 2024-11-28T07:23:16,971 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/75efa33508cf4537b53712a13930e9fe to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/75efa33508cf4537b53712a13930e9fe 2024-11-28T07:23:16,973 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/7ca59f99ae824b2fbf077b478415cac6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/7ca59f99ae824b2fbf077b478415cac6 2024-11-28T07:23:16,974 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a7acbbb10a3c44b7b85cad203e623674 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/C/a7acbbb10a3c44b7b85cad203e623674 2024-11-28T07:23:16,976 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/recovered.edits/501.seqid to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00/recovered.edits/501.seqid 2024-11-28T07:23:16,977 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,977 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T07:23:16,977 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T07:23:16,978 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-28T07:23:16,981 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280752904ea4ff4d8a9e4b37257b356e6f_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280752904ea4ff4d8a9e4b37257b356e6f_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,982 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280868c5f7a5cd4effa4dd0b0d91ba4b69_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280868c5f7a5cd4effa4dd0b0d91ba4b69_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,983 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280bb948ca685d4b709dae6e15a1a8d019_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280bb948ca685d4b709dae6e15a1a8d019_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,984 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112810dd9c4cb1394617a83cf3fd6ca798c1_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112810dd9c4cb1394617a83cf3fd6ca798c1_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,986 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128119a7a5ee1c94c0e93c3b1b1f28502be_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128119a7a5ee1c94c0e93c3b1b1f28502be_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,987 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128181005c01a3c438f9f86f5c837a1b732_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128181005c01a3c438f9f86f5c837a1b732_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,988 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112822fdc2bc7a704f0c98d45bb64bad22a0_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112822fdc2bc7a704f0c98d45bb64bad22a0_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,990 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128277925a4481e47928225971f0886b3d9_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128277925a4481e47928225971f0886b3d9_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,991 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284b5ad46f322e4841866571df0b5281ed_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284b5ad46f322e4841866571df0b5281ed_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,992 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285e785c8d9dde4831a673cb7f0639f38c_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285e785c8d9dde4831a673cb7f0639f38c_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,993 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112861ddac9e63b441dbb8c018f5c109abcb_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112861ddac9e63b441dbb8c018f5c109abcb_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,995 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286d4994d185994c1ea2840cf1e69036fc_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286d4994d185994c1ea2840cf1e69036fc_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,996 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287aef260858f244038e3eb83e5c81aba3_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287aef260858f244038e3eb83e5c81aba3_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,997 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287ba52cdbf3744a28bfc7adfeb3c4aaaf_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287ba52cdbf3744a28bfc7adfeb3c4aaaf_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:16,999 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112886b77d2583774242b9778b7207cdab1b_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112886b77d2583774242b9778b7207cdab1b_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,000 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288fbf45e991714bd29255b240114429b4_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288fbf45e991714bd29255b240114429b4_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,001 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128902e023643c64a58bfc5211842234e31_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128902e023643c64a58bfc5211842234e31_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,003 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128910b819149664578bd70c5a7949c3fb0_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128910b819149664578bd70c5a7949c3fb0_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,004 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112898116b9e27d24f79abeaab617098b560_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112898116b9e27d24f79abeaab617098b560_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,005 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289ee6817b18064546b9077d7897abc8b6_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289ee6817b18064546b9077d7897abc8b6_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,006 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b734b784e96f45a2a556eaa0fba57ceb_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b734b784e96f45a2a556eaa0fba57ceb_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,008 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b9792ad773454eff8be202dae41fd0b7_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b9792ad773454eff8be202dae41fd0b7_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,009 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128cec68c6c6e3c4f90bf8e084e9864b1af_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128cec68c6c6e3c4f90bf8e084e9864b1af_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,010 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128deec5d1c03544ad0b7ff6dff93df9a0c_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128deec5d1c03544ad0b7ff6dff93df9a0c_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,012 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ed0bb409eaba47c1ad08061993453795_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ed0bb409eaba47c1ad08061993453795_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,013 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128faaa4063b8f94431bb721dbb067c24f9_92fcea6fc878b4b1c7f03e0a8e3d3d00 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128faaa4063b8f94431bb721dbb067c24f9_92fcea6fc878b4b1c7f03e0a8e3d3d00 2024-11-28T07:23:17,013 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T07:23:17,016 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=134, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:17,018 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T07:23:17,020 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T07:23:17,021 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=134, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:17,021 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T07:23:17,021 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732778597021"}]},"ts":"9223372036854775807"} 2024-11-28T07:23:17,023 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T07:23:17,023 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 92fcea6fc878b4b1c7f03e0a8e3d3d00, NAME => 'TestAcidGuarantees,,1732778566387.92fcea6fc878b4b1c7f03e0a8e3d3d00.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T07:23:17,024 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T07:23:17,024 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732778597024"}]},"ts":"9223372036854775807"} 2024-11-28T07:23:17,025 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T07:23:17,028 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=134, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:17,029 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 75 msec 2024-11-28T07:23:17,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-28T07:23:17,056 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-28T07:23:17,069 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=238 (was 241), OpenFileDescriptor=453 (was 457), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=640 (was 595) - SystemLoadAverage LEAK? -, ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4380 (was 4545) 2024-11-28T07:23:17,079 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=238, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=640, ProcessCount=11, AvailableMemoryMB=4380 2024-11-28T07:23:17,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T07:23:17,081 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:23:17,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:17,083 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T07:23:17,083 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:17,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 135 2024-11-28T07:23:17,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-11-28T07:23:17,083 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T07:23:17,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742369_1545 (size=960) 2024-11-28T07:23:17,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-11-28T07:23:17,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-11-28T07:23:17,490 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e 2024-11-28T07:23:17,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742370_1546 (size=53) 2024-11-28T07:23:17,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-11-28T07:23:17,898 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:23:17,898 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing e310f48e6ef0ed637c2d62fa297701bf, disabling compactions & flushes 2024-11-28T07:23:17,899 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:17,899 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:17,899 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. after waiting 0 ms 2024-11-28T07:23:17,899 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:17,899 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:17,899 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:17,900 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T07:23:17,900 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732778597900"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732778597900"}]},"ts":"1732778597900"} 2024-11-28T07:23:17,901 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T07:23:17,901 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T07:23:17,902 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778597901"}]},"ts":"1732778597901"} 2024-11-28T07:23:17,902 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T07:23:17,906 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e310f48e6ef0ed637c2d62fa297701bf, ASSIGN}] 2024-11-28T07:23:17,907 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e310f48e6ef0ed637c2d62fa297701bf, ASSIGN 2024-11-28T07:23:17,907 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e310f48e6ef0ed637c2d62fa297701bf, ASSIGN; state=OFFLINE, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=false 2024-11-28T07:23:18,058 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=136 updating hbase:meta row=e310f48e6ef0ed637c2d62fa297701bf, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:23:18,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; OpenRegionProcedure e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:23:18,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-11-28T07:23:18,210 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:18,213 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:18,213 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(7285): Opening region: {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:23:18,213 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:18,214 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:23:18,214 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(7327): checking encryption for e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:18,214 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(7330): checking classloading for e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:18,215 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:18,216 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:23:18,216 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e310f48e6ef0ed637c2d62fa297701bf columnFamilyName A 2024-11-28T07:23:18,216 DEBUG [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:18,217 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] regionserver.HStore(327): Store=e310f48e6ef0ed637c2d62fa297701bf/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:23:18,217 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:18,218 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:23:18,218 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e310f48e6ef0ed637c2d62fa297701bf columnFamilyName B 2024-11-28T07:23:18,218 DEBUG [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:18,218 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] regionserver.HStore(327): Store=e310f48e6ef0ed637c2d62fa297701bf/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:23:18,218 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:18,219 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:23:18,220 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e310f48e6ef0ed637c2d62fa297701bf columnFamilyName C 2024-11-28T07:23:18,220 DEBUG [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:18,220 INFO [StoreOpener-e310f48e6ef0ed637c2d62fa297701bf-1 {}] regionserver.HStore(327): Store=e310f48e6ef0ed637c2d62fa297701bf/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:23:18,220 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:18,221 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:18,221 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:18,222 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:23:18,224 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1085): writing seq id for e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:18,225 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T07:23:18,225 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1102): Opened e310f48e6ef0ed637c2d62fa297701bf; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65164762, jitterRate=-0.028969377279281616}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:23:18,226 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1001): Region open journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:18,227 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., pid=137, masterSystemTime=1732778598210 2024-11-28T07:23:18,228 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:18,228 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:18,228 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=136 updating hbase:meta row=e310f48e6ef0ed637c2d62fa297701bf, regionState=OPEN, openSeqNum=2, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:23:18,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-28T07:23:18,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; OpenRegionProcedure e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 in 170 msec 2024-11-28T07:23:18,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=136, resume processing ppid=135 2024-11-28T07:23:18,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, ppid=135, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e310f48e6ef0ed637c2d62fa297701bf, ASSIGN in 324 msec 2024-11-28T07:23:18,232 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T07:23:18,232 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778598232"}]},"ts":"1732778598232"} 2024-11-28T07:23:18,233 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T07:23:18,236 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T07:23:18,236 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1550 sec 2024-11-28T07:23:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-11-28T07:23:19,187 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 135 completed 2024-11-28T07:23:19,188 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c60eb7d to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@695c2253 2024-11-28T07:23:19,192 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63cefe40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,194 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,195 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55608, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,196 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T07:23:19,196 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37818, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T07:23:19,198 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79b10416 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7177efc9 2024-11-28T07:23:19,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65df2359, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,201 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f142b04 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61d38088 2024-11-28T07:23:19,204 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0ab200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,205 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0de9f076 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7043f683 2024-11-28T07:23:19,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5871c039, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,208 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4414259d to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b0c2472 2024-11-28T07:23:19,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7daa5922, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,211 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ed69825 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34b30c39 2024-11-28T07:23:19,214 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7f20c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,215 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d672ed2 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f7c40ba 2024-11-28T07:23:19,218 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2070263a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,219 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cf40102 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41b0e7b6 2024-11-28T07:23:19,221 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6050584c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,222 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x496fe03f to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f2423f3 2024-11-28T07:23:19,230 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd48863, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,231 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3652e74d to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@184771cf 2024-11-28T07:23:19,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51196534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,241 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2405c04e to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76f0408 2024-11-28T07:23:19,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc5e114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:19,276 DEBUG [hconnection-0xe1825c6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,278 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55618, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,282 DEBUG [hconnection-0x601bf63e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,283 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55626, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,286 DEBUG [hconnection-0x3572b8ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,288 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,288 DEBUG [hconnection-0x79ca8a53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,290 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55634, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,292 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:19,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-28T07:23:19,298 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:19,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T07:23:19,299 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:19,299 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:19,300 DEBUG [hconnection-0x6d763350-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,301 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55644, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:19,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:23:19,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:19,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:19,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:19,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:19,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:19,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:19,322 DEBUG [hconnection-0x1e386e06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,324 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55658, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778659337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778659337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,356 DEBUG [hconnection-0x3728468b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,357 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55666, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d718fdef598f44458a4e97498bdeb8dc is 50, key is test_row_0/A:col10/1732778599303/Put/seqid=0 2024-11-28T07:23:19,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778659361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T07:23:19,402 DEBUG [hconnection-0xcad36d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,403 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55682, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,416 DEBUG [hconnection-0x5512c8e1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,419 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55694, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778659429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,430 DEBUG [hconnection-0x5eb11946-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:19,431 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55702, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:19,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778659432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778659439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778659439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,451 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:19,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:19,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:19,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:19,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:19,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778659463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742371_1547 (size=12001) 2024-11-28T07:23:19,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778659531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778659539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T07:23:19,605 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:19,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:19,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:19,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:19,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:19,606 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778659642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778659644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778659665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778659733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778659750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,760 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:19,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:19,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:19,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:19,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:19,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T07:23:19,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d718fdef598f44458a4e97498bdeb8dc 2024-11-28T07:23:19,913 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:19,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:19,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:19,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:19,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:19,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:19,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778659945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778659947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:19,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778659968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:19,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/ca5d0ba68b664462ab7cebe966d601f5 is 50, key is test_row_0/B:col10/1732778599303/Put/seqid=0 2024-11-28T07:23:20,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742372_1548 (size=12001) 2024-11-28T07:23:20,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/ca5d0ba68b664462ab7cebe966d601f5 2024-11-28T07:23:20,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778660044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778660052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/e92917998fad4d088abc2dcdb2d9bca8 is 50, key is test_row_0/C:col10/1732778599303/Put/seqid=0 2024-11-28T07:23:20,067 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:20,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:20,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:20,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742373_1549 (size=12001) 2024-11-28T07:23:20,220 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:20,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:20,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:20,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,373 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:20,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T07:23:20,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778660450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778660452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778660471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/e92917998fad4d088abc2dcdb2d9bca8 2024-11-28T07:23:20,526 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:20,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d718fdef598f44458a4e97498bdeb8dc as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d718fdef598f44458a4e97498bdeb8dc 2024-11-28T07:23:20,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d718fdef598f44458a4e97498bdeb8dc, entries=150, sequenceid=13, filesize=11.7 K 2024-11-28T07:23:20,537 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-28T07:23:20,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/ca5d0ba68b664462ab7cebe966d601f5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ca5d0ba68b664462ab7cebe966d601f5 2024-11-28T07:23:20,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ca5d0ba68b664462ab7cebe966d601f5, entries=150, sequenceid=13, filesize=11.7 K 2024-11-28T07:23:20,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/e92917998fad4d088abc2dcdb2d9bca8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e92917998fad4d088abc2dcdb2d9bca8 2024-11-28T07:23:20,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e92917998fad4d088abc2dcdb2d9bca8, entries=150, sequenceid=13, filesize=11.7 K 2024-11-28T07:23:20,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for e310f48e6ef0ed637c2d62fa297701bf in 1242ms, sequenceid=13, compaction requested=false 2024-11-28T07:23:20,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:20,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T07:23:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:20,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:20,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/e977324341394668beaf9a73bfc2fdd7 is 50, key is test_row_0/A:col10/1732778600554/Put/seqid=0 2024-11-28T07:23:20,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778660581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778660582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742374_1550 (size=14341) 2024-11-28T07:23:20,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/e977324341394668beaf9a73bfc2fdd7 2024-11-28T07:23:20,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/a3c02d4f4b1f4ac49d67e406d7e206b5 is 50, key is test_row_0/B:col10/1732778600554/Put/seqid=0 2024-11-28T07:23:20,692 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:20,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:20,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:20,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,694 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742375_1551 (size=12001) 2024-11-28T07:23:20,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/a3c02d4f4b1f4ac49d67e406d7e206b5 2024-11-28T07:23:20,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778660694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778660694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/2bbfc000e8254be98fad63aabd6d5491 is 50, key is test_row_0/C:col10/1732778600554/Put/seqid=0 2024-11-28T07:23:20,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742376_1552 (size=12001) 2024-11-28T07:23:20,846 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:20,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:20,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:20,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:20,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:20,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778660905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:20,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:20,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778660908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:21,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:21,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:21,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,155 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:21,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:21,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:21,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/2bbfc000e8254be98fad63aabd6d5491 2024-11-28T07:23:21,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/e977324341394668beaf9a73bfc2fdd7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e977324341394668beaf9a73bfc2fdd7 2024-11-28T07:23:21,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e977324341394668beaf9a73bfc2fdd7, entries=200, sequenceid=39, filesize=14.0 K 2024-11-28T07:23:21,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/a3c02d4f4b1f4ac49d67e406d7e206b5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a3c02d4f4b1f4ac49d67e406d7e206b5 2024-11-28T07:23:21,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a3c02d4f4b1f4ac49d67e406d7e206b5, entries=150, sequenceid=39, filesize=11.7 K 2024-11-28T07:23:21,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/2bbfc000e8254be98fad63aabd6d5491 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2bbfc000e8254be98fad63aabd6d5491 2024-11-28T07:23:21,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2bbfc000e8254be98fad63aabd6d5491, entries=150, sequenceid=39, filesize=11.7 K 2024-11-28T07:23:21,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for e310f48e6ef0ed637c2d62fa297701bf in 645ms, sequenceid=39, compaction requested=false 2024-11-28T07:23:21,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:21,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:21,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:23:21,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:21,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:21,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:21,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:21,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:21,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:21,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/b0a401a1a7db461786689486e1ee286d is 50, key is test_row_0/A:col10/1732778600581/Put/seqid=0 2024-11-28T07:23:21,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742377_1553 (size=14337) 2024-11-28T07:23:21,312 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:21,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:21,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:21,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,313 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:21,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778661345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778661352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T07:23:21,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:21,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778661455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778661459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,467 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:21,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:21,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:21,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:21,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778661463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:21,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778661464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:21,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778661484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,620 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:21,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:21,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:21,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,621 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/b0a401a1a7db461786689486e1ee286d 2024-11-28T07:23:21,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778661666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:21,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778661668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:21,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9055bcab639d43f9b87eea227f55969a is 50, key is test_row_0/B:col10/1732778600581/Put/seqid=0 2024-11-28T07:23:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742378_1554 (size=9657) 2024-11-28T07:23:21,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9055bcab639d43f9b87eea227f55969a 2024-11-28T07:23:21,774 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:21,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:21,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:21,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:21,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/2f42b8d601744ec790399bdf3d3064f1 is 50, key is test_row_0/C:col10/1732778600581/Put/seqid=0 2024-11-28T07:23:21,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742379_1555 (size=9657) 2024-11-28T07:23:21,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/2f42b8d601744ec790399bdf3d3064f1 2024-11-28T07:23:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/b0a401a1a7db461786689486e1ee286d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b0a401a1a7db461786689486e1ee286d 2024-11-28T07:23:21,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b0a401a1a7db461786689486e1ee286d, entries=200, sequenceid=51, filesize=14.0 K 2024-11-28T07:23:21,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9055bcab639d43f9b87eea227f55969a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9055bcab639d43f9b87eea227f55969a 2024-11-28T07:23:21,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9055bcab639d43f9b87eea227f55969a, entries=100, sequenceid=51, filesize=9.4 K 2024-11-28T07:23:21,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/2f42b8d601744ec790399bdf3d3064f1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2f42b8d601744ec790399bdf3d3064f1 2024-11-28T07:23:21,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2f42b8d601744ec790399bdf3d3064f1, entries=100, sequenceid=51, filesize=9.4 K 2024-11-28T07:23:21,863 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e310f48e6ef0ed637c2d62fa297701bf in 627ms, sequenceid=51, compaction requested=true 2024-11-28T07:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:21,863 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:21,863 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:21,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:21,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:21,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:21,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:21,866 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40679 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:21,866 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:21,866 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:21,866 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,866 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ca5d0ba68b664462ab7cebe966d601f5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a3c02d4f4b1f4ac49d67e406d7e206b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9055bcab639d43f9b87eea227f55969a] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=32.9 K 2024-11-28T07:23:21,867 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:21,867 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,867 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d718fdef598f44458a4e97498bdeb8dc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e977324341394668beaf9a73bfc2fdd7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b0a401a1a7db461786689486e1ee286d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=39.7 K 2024-11-28T07:23:21,867 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ca5d0ba68b664462ab7cebe966d601f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732778599303 2024-11-28T07:23:21,868 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d718fdef598f44458a4e97498bdeb8dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732778599303 2024-11-28T07:23:21,868 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a3c02d4f4b1f4ac49d67e406d7e206b5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732778599332 2024-11-28T07:23:21,868 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e977324341394668beaf9a73bfc2fdd7, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732778599332 2024-11-28T07:23:21,869 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9055bcab639d43f9b87eea227f55969a, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732778600581 2024-11-28T07:23:21,869 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0a401a1a7db461786689486e1ee286d, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732778600567 2024-11-28T07:23:21,894 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#463 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:21,897 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/664d9b04452342dc8885e2bbf035979b is 50, key is test_row_0/B:col10/1732778600581/Put/seqid=0 2024-11-28T07:23:21,897 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#464 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:21,898 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/ac3ef87817a140cebab675912479d3b5 is 50, key is test_row_0/A:col10/1732778600581/Put/seqid=0 2024-11-28T07:23:21,929 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:21,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T07:23:21,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,929 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:23:21,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:21,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:21,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:21,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:21,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:21,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:21,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742380_1556 (size=12104) 2024-11-28T07:23:21,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742381_1557 (size=12104) 2024-11-28T07:23:21,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:21,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:21,986 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/ac3ef87817a140cebab675912479d3b5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/ac3ef87817a140cebab675912479d3b5 2024-11-28T07:23:21,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/906ebfe46f8a4c038fec60c6c688b81d is 50, key is test_row_0/A:col10/1732778601351/Put/seqid=0 2024-11-28T07:23:21,994 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into ac3ef87817a140cebab675912479d3b5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:21,994 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:21,994 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=13, startTime=1732778601863; duration=0sec 2024-11-28T07:23:21,994 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:21,994 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:21,994 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:21,997 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:21,997 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:21,997 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:21,997 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e92917998fad4d088abc2dcdb2d9bca8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2bbfc000e8254be98fad63aabd6d5491, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2f42b8d601744ec790399bdf3d3064f1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=32.9 K 2024-11-28T07:23:21,998 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e92917998fad4d088abc2dcdb2d9bca8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732778599303 2024-11-28T07:23:21,999 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bbfc000e8254be98fad63aabd6d5491, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732778599332 2024-11-28T07:23:21,999 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f42b8d601744ec790399bdf3d3064f1, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732778600581 2024-11-28T07:23:22,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:22,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778662018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:22,034 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#466 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:22,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:22,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778662025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:22,034 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/1e4e5eb633454ed89199a25535de0082 is 50, key is test_row_0/C:col10/1732778600581/Put/seqid=0 2024-11-28T07:23:22,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742382_1558 (size=12001) 2024-11-28T07:23:22,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742383_1559 (size=12104) 2024-11-28T07:23:22,082 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/1e4e5eb633454ed89199a25535de0082 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1e4e5eb633454ed89199a25535de0082 2024-11-28T07:23:22,088 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into 1e4e5eb633454ed89199a25535de0082(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:22,088 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:22,088 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=13, startTime=1732778601864; duration=0sec 2024-11-28T07:23:22,088 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:22,088 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:22,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:22,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778662127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:22,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:22,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778662137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:22,241 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T07:23:22,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:22,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778662335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:22,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:22,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778662345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:22,357 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/664d9b04452342dc8885e2bbf035979b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/664d9b04452342dc8885e2bbf035979b 2024-11-28T07:23:22,363 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 664d9b04452342dc8885e2bbf035979b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:22,363 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:22,363 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=13, startTime=1732778601863; duration=0sec 2024-11-28T07:23:22,363 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:22,363 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:22,449 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/906ebfe46f8a4c038fec60c6c688b81d 2024-11-28T07:23:22,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/917a1269e14a4bb3bc5db0fcaac475b7 is 50, key is test_row_0/B:col10/1732778601351/Put/seqid=0 2024-11-28T07:23:22,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742384_1560 (size=12001) 2024-11-28T07:23:22,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:22,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778662646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:22,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:22,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778662655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:22,917 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/917a1269e14a4bb3bc5db0fcaac475b7 2024-11-28T07:23:22,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/faed1a3445db4e5db43a4dd5bf0ebf56 is 50, key is test_row_0/C:col10/1732778601351/Put/seqid=0 2024-11-28T07:23:22,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742385_1561 (size=12001) 2024-11-28T07:23:22,972 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/faed1a3445db4e5db43a4dd5bf0ebf56 2024-11-28T07:23:22,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/906ebfe46f8a4c038fec60c6c688b81d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/906ebfe46f8a4c038fec60c6c688b81d 2024-11-28T07:23:22,979 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/906ebfe46f8a4c038fec60c6c688b81d, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T07:23:22,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/917a1269e14a4bb3bc5db0fcaac475b7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/917a1269e14a4bb3bc5db0fcaac475b7 2024-11-28T07:23:22,988 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/917a1269e14a4bb3bc5db0fcaac475b7, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T07:23:22,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/faed1a3445db4e5db43a4dd5bf0ebf56 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/faed1a3445db4e5db43a4dd5bf0ebf56 2024-11-28T07:23:22,997 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/faed1a3445db4e5db43a4dd5bf0ebf56, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T07:23:23,002 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for e310f48e6ef0ed637c2d62fa297701bf in 1073ms, sequenceid=75, compaction requested=false 2024-11-28T07:23:23,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:23,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-28T07:23:23,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-28T07:23:23,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-28T07:23:23,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.7050 sec 2024-11-28T07:23:23,014 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 3.7210 sec 2024-11-28T07:23:23,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:23,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:23:23,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:23,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:23,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:23,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:23,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:23,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:23,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/32c776427a2b48cd9d5c38b835845c68 is 50, key is test_row_0/A:col10/1732778602024/Put/seqid=0 2024-11-28T07:23:23,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742386_1562 (size=14341) 2024-11-28T07:23:23,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/32c776427a2b48cd9d5c38b835845c68 2024-11-28T07:23:23,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/3449d53d057c44aaa4f1804ee19ca668 is 50, key is test_row_0/B:col10/1732778602024/Put/seqid=0 2024-11-28T07:23:23,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742387_1563 (size=12001) 2024-11-28T07:23:23,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/3449d53d057c44aaa4f1804ee19ca668 2024-11-28T07:23:23,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/24a1164fee154546a0d297017b812d48 is 50, key is test_row_0/C:col10/1732778602024/Put/seqid=0 2024-11-28T07:23:23,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778663254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778663256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742388_1564 (size=12001) 2024-11-28T07:23:23,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/24a1164fee154546a0d297017b812d48 2024-11-28T07:23:23,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/32c776427a2b48cd9d5c38b835845c68 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/32c776427a2b48cd9d5c38b835845c68 2024-11-28T07:23:23,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/32c776427a2b48cd9d5c38b835845c68, entries=200, sequenceid=91, filesize=14.0 K 2024-11-28T07:23:23,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/3449d53d057c44aaa4f1804ee19ca668 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/3449d53d057c44aaa4f1804ee19ca668 2024-11-28T07:23:23,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/3449d53d057c44aaa4f1804ee19ca668, entries=150, sequenceid=91, filesize=11.7 K 2024-11-28T07:23:23,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/24a1164fee154546a0d297017b812d48 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/24a1164fee154546a0d297017b812d48 2024-11-28T07:23:23,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/24a1164fee154546a0d297017b812d48, entries=150, sequenceid=91, filesize=11.7 K 2024-11-28T07:23:23,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e310f48e6ef0ed637c2d62fa297701bf in 160ms, sequenceid=91, compaction requested=true 2024-11-28T07:23:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:23,322 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:23,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:23:23,323 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:23,323 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:23,323 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:23,323 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:23,323 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:23,324 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,324 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,324 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/664d9b04452342dc8885e2bbf035979b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/917a1269e14a4bb3bc5db0fcaac475b7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/3449d53d057c44aaa4f1804ee19ca668] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=35.3 K 2024-11-28T07:23:23,324 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/ac3ef87817a140cebab675912479d3b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/906ebfe46f8a4c038fec60c6c688b81d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/32c776427a2b48cd9d5c38b835845c68] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=37.5 K 2024-11-28T07:23:23,324 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 664d9b04452342dc8885e2bbf035979b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732778599332 2024-11-28T07:23:23,324 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ac3ef87817a140cebab675912479d3b5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732778599332 2024-11-28T07:23:23,324 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 906ebfe46f8a4c038fec60c6c688b81d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732778601316 2024-11-28T07:23:23,324 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 917a1269e14a4bb3bc5db0fcaac475b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732778601316 2024-11-28T07:23:23,325 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 32c776427a2b48cd9d5c38b835845c68, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732778602004 2024-11-28T07:23:23,325 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3449d53d057c44aaa4f1804ee19ca668, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732778602015 2024-11-28T07:23:23,345 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#472 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:23,345 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/b4639a717b87410aae36970418f311ce is 50, key is test_row_0/A:col10/1732778602024/Put/seqid=0 2024-11-28T07:23:23,357 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#473 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:23,358 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/a46801e1820b45daaee63a8d826cda52 is 50, key is test_row_0/B:col10/1732778602024/Put/seqid=0 2024-11-28T07:23:23,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:23,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:23:23,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:23,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:23,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:23,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:23,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:23,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:23,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742389_1565 (size=12207) 2024-11-28T07:23:23,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T07:23:23,407 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-28T07:23:23,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d788da05f63247c8905ef67419983e8c is 50, key is test_row_0/A:col10/1732778603255/Put/seqid=0 2024-11-28T07:23:23,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:23,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-28T07:23:23,417 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:23,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T07:23:23,417 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:23,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:23,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778663416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778663419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742390_1566 (size=12207) 2024-11-28T07:23:23,452 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/a46801e1820b45daaee63a8d826cda52 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a46801e1820b45daaee63a8d826cda52 2024-11-28T07:23:23,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742391_1567 (size=14341) 2024-11-28T07:23:23,458 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into a46801e1820b45daaee63a8d826cda52(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:23,458 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:23,458 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=13, startTime=1732778603322; duration=0sec 2024-11-28T07:23:23,459 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:23,459 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:23,459 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:23,460 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:23,460 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:23,460 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,460 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1e4e5eb633454ed89199a25535de0082, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/faed1a3445db4e5db43a4dd5bf0ebf56, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/24a1164fee154546a0d297017b812d48] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=35.3 K 2024-11-28T07:23:23,461 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e4e5eb633454ed89199a25535de0082, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732778599332 2024-11-28T07:23:23,462 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting faed1a3445db4e5db43a4dd5bf0ebf56, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732778601316 2024-11-28T07:23:23,464 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24a1164fee154546a0d297017b812d48, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732778602015 2024-11-28T07:23:23,478 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#475 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:23,478 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/eed3ec97cea74d37bd462264f8b5e3d1 is 50, key is test_row_0/C:col10/1732778602024/Put/seqid=0 2024-11-28T07:23:23,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778663475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,481 DEBUG [Thread-2396 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:23,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778663497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,502 DEBUG [Thread-2400 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:23,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742392_1568 (size=12207) 2024-11-28T07:23:23,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T07:23:23,519 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/eed3ec97cea74d37bd462264f8b5e3d1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/eed3ec97cea74d37bd462264f8b5e3d1 2024-11-28T07:23:23,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778663508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,522 DEBUG [Thread-2394 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4208 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:23,525 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into eed3ec97cea74d37bd462264f8b5e3d1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:23,525 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:23,525 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=13, startTime=1732778603322; duration=0sec 2024-11-28T07:23:23,525 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:23,525 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:23,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778663525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778663532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,570 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:23,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T07:23:23,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:23,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,574 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:23,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:23,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T07:23:23,726 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:23,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T07:23:23,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:23,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:23,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:23,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:23,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778663738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:23,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778663738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:23,812 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/b4639a717b87410aae36970418f311ce as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b4639a717b87410aae36970418f311ce 2024-11-28T07:23:23,818 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into b4639a717b87410aae36970418f311ce(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:23,818 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:23,818 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=13, startTime=1732778603322; duration=0sec 2024-11-28T07:23:23,818 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:23,818 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:23,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d788da05f63247c8905ef67419983e8c 2024-11-28T07:23:23,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e6ce7e583ba049188dcce0f501be9c6e is 50, key is test_row_0/B:col10/1732778603255/Put/seqid=0 2024-11-28T07:23:23,880 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:23,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T07:23:23,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:23,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:23,881 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:23,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:23,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:23,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742393_1569 (size=12001) 2024-11-28T07:23:23,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e6ce7e583ba049188dcce0f501be9c6e 2024-11-28T07:23:23,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/fabda253d29b40b08ac83e747763eab1 is 50, key is test_row_0/C:col10/1732778603255/Put/seqid=0 2024-11-28T07:23:23,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742394_1570 (size=12001) 2024-11-28T07:23:23,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/fabda253d29b40b08ac83e747763eab1 2024-11-28T07:23:23,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d788da05f63247c8905ef67419983e8c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d788da05f63247c8905ef67419983e8c 2024-11-28T07:23:23,987 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d788da05f63247c8905ef67419983e8c, entries=200, sequenceid=115, filesize=14.0 K 2024-11-28T07:23:23,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e6ce7e583ba049188dcce0f501be9c6e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e6ce7e583ba049188dcce0f501be9c6e 2024-11-28T07:23:23,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e6ce7e583ba049188dcce0f501be9c6e, entries=150, sequenceid=115, filesize=11.7 K 2024-11-28T07:23:24,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/fabda253d29b40b08ac83e747763eab1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/fabda253d29b40b08ac83e747763eab1 2024-11-28T07:23:24,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/fabda253d29b40b08ac83e747763eab1, entries=150, sequenceid=115, filesize=11.7 K 2024-11-28T07:23:24,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for e310f48e6ef0ed637c2d62fa297701bf in 634ms, sequenceid=115, compaction requested=false 2024-11-28T07:23:24,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:24,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T07:23:24,040 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:24,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T07:23:24,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:24,041 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:23:24,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:24,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:24,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:24,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:24,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:24,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:24,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f4c25c9da0bc4ead8360d115c3dd4e1f is 50, key is test_row_0/A:col10/1732778603380/Put/seqid=0 2024-11-28T07:23:24,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:24,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:24,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742395_1571 (size=12001) 2024-11-28T07:23:24,091 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f4c25c9da0bc4ead8360d115c3dd4e1f 2024-11-28T07:23:24,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/86f29501a2a5406ba5fc3a7b574f065a is 50, key is test_row_0/B:col10/1732778603380/Put/seqid=0 2024-11-28T07:23:24,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742396_1572 (size=12001) 2024-11-28T07:23:24,119 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/86f29501a2a5406ba5fc3a7b574f065a 2024-11-28T07:23:24,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/82349ec0d854476a8508a347fd7cf9da is 50, key is test_row_0/C:col10/1732778603380/Put/seqid=0 2024-11-28T07:23:24,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742397_1573 (size=12001) 2024-11-28T07:23:24,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:24,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778664207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:24,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:24,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778664209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:24,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:24,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778664320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:24,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778664321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T07:23:24,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778664529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:24,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778664540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:24,584 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/82349ec0d854476a8508a347fd7cf9da 2024-11-28T07:23:24,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f4c25c9da0bc4ead8360d115c3dd4e1f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f4c25c9da0bc4ead8360d115c3dd4e1f 2024-11-28T07:23:24,597 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f4c25c9da0bc4ead8360d115c3dd4e1f, entries=150, sequenceid=130, filesize=11.7 K 2024-11-28T07:23:24,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/86f29501a2a5406ba5fc3a7b574f065a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/86f29501a2a5406ba5fc3a7b574f065a 2024-11-28T07:23:24,604 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/86f29501a2a5406ba5fc3a7b574f065a, entries=150, sequenceid=130, filesize=11.7 K 2024-11-28T07:23:24,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/82349ec0d854476a8508a347fd7cf9da as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/82349ec0d854476a8508a347fd7cf9da 2024-11-28T07:23:24,610 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/82349ec0d854476a8508a347fd7cf9da, entries=150, sequenceid=130, filesize=11.7 K 2024-11-28T07:23:24,613 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e310f48e6ef0ed637c2d62fa297701bf in 572ms, sequenceid=130, compaction requested=true 2024-11-28T07:23:24,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:24,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:24,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-28T07:23:24,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-28T07:23:24,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-28T07:23:24,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1960 sec 2024-11-28T07:23:24,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.2050 sec 2024-11-28T07:23:24,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:23:24,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:24,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:24,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:24,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:24,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:24,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:24,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/1857c6be4aa8451c9aa9c36e871711c0 is 50, key is test_row_0/A:col10/1732778604172/Put/seqid=0 2024-11-28T07:23:24,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:24,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778664890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:24,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:24,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778664894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:24,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742398_1574 (size=14541) 2024-11-28T07:23:24,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/1857c6be4aa8451c9aa9c36e871711c0 2024-11-28T07:23:24,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/39c44b60b0d44cd5b850e61ad6d88bd7 is 50, key is test_row_0/B:col10/1732778604172/Put/seqid=0 2024-11-28T07:23:24,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742399_1575 (size=12151) 2024-11-28T07:23:25,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778665001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778665004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778665211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778665213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,374 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/39c44b60b0d44cd5b850e61ad6d88bd7 2024-11-28T07:23:25,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/aa418ae16cac47dbb95e4f37a1c0beb2 is 50, key is test_row_0/C:col10/1732778604172/Put/seqid=0 2024-11-28T07:23:25,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742400_1576 (size=12151) 2024-11-28T07:23:25,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/aa418ae16cac47dbb95e4f37a1c0beb2 2024-11-28T07:23:25,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/1857c6be4aa8451c9aa9c36e871711c0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1857c6be4aa8451c9aa9c36e871711c0 2024-11-28T07:23:25,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1857c6be4aa8451c9aa9c36e871711c0, entries=200, sequenceid=155, filesize=14.2 K 2024-11-28T07:23:25,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/39c44b60b0d44cd5b850e61ad6d88bd7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/39c44b60b0d44cd5b850e61ad6d88bd7 2024-11-28T07:23:25,439 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/39c44b60b0d44cd5b850e61ad6d88bd7, entries=150, sequenceid=155, filesize=11.9 K 2024-11-28T07:23:25,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/aa418ae16cac47dbb95e4f37a1c0beb2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/aa418ae16cac47dbb95e4f37a1c0beb2 2024-11-28T07:23:25,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/aa418ae16cac47dbb95e4f37a1c0beb2, entries=150, sequenceid=155, filesize=11.9 K 2024-11-28T07:23:25,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for e310f48e6ef0ed637c2d62fa297701bf in 600ms, sequenceid=155, compaction requested=true 2024-11-28T07:23:25,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:25,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:25,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:25,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:25,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:23:25,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:25,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T07:23:25,446 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:25,446 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:25,448 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:25,448 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:25,448 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:25,448 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b4639a717b87410aae36970418f311ce, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d788da05f63247c8905ef67419983e8c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f4c25c9da0bc4ead8360d115c3dd4e1f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1857c6be4aa8451c9aa9c36e871711c0] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=51.8 K 2024-11-28T07:23:25,449 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:25,449 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:25,449 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:25,449 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/eed3ec97cea74d37bd462264f8b5e3d1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/fabda253d29b40b08ac83e747763eab1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/82349ec0d854476a8508a347fd7cf9da, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/aa418ae16cac47dbb95e4f37a1c0beb2] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=47.2 K 2024-11-28T07:23:25,449 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4639a717b87410aae36970418f311ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732778602015 2024-11-28T07:23:25,449 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting eed3ec97cea74d37bd462264f8b5e3d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732778602015 2024-11-28T07:23:25,450 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d788da05f63247c8905ef67419983e8c, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732778603217 2024-11-28T07:23:25,450 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting fabda253d29b40b08ac83e747763eab1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732778603243 2024-11-28T07:23:25,450 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4c25c9da0bc4ead8360d115c3dd4e1f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732778603380 2024-11-28T07:23:25,450 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 82349ec0d854476a8508a347fd7cf9da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732778603380 2024-11-28T07:23:25,450 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1857c6be4aa8451c9aa9c36e871711c0, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732778604172 2024-11-28T07:23:25,451 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting aa418ae16cac47dbb95e4f37a1c0beb2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732778604172 2024-11-28T07:23:25,469 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#484 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:25,470 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d852f14bd80e4a1ab82abc801a18daaf is 50, key is test_row_0/C:col10/1732778604172/Put/seqid=0 2024-11-28T07:23:25,473 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#485 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:25,473 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f9884ebf6ba54cb793bbdc7f56b5acde is 50, key is test_row_0/A:col10/1732778604172/Put/seqid=0 2024-11-28T07:23:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T07:23:25,524 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-28T07:23:25,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742401_1577 (size=12493) 2024-11-28T07:23:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:25,529 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:25,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:23:25,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-28T07:23:25,531 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:25,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T07:23:25,532 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:25,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:25,532 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:25,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:25,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:25,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:25,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:25,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:25,537 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d852f14bd80e4a1ab82abc801a18daaf as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d852f14bd80e4a1ab82abc801a18daaf 2024-11-28T07:23:25,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/1814c6265802486ca3ed6aa9474fca51 is 50, key is test_row_0/A:col10/1732778605526/Put/seqid=0 2024-11-28T07:23:25,543 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into d852f14bd80e4a1ab82abc801a18daaf(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:25,543 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:25,543 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=12, startTime=1732778605446; duration=0sec 2024-11-28T07:23:25,543 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:25,543 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:25,543 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:25,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742402_1578 (size=12493) 2024-11-28T07:23:25,545 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:25,545 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:25,545 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:25,545 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a46801e1820b45daaee63a8d826cda52, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e6ce7e583ba049188dcce0f501be9c6e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/86f29501a2a5406ba5fc3a7b574f065a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/39c44b60b0d44cd5b850e61ad6d88bd7] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=47.2 K 2024-11-28T07:23:25,546 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a46801e1820b45daaee63a8d826cda52, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732778602015 2024-11-28T07:23:25,547 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e6ce7e583ba049188dcce0f501be9c6e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732778603243 2024-11-28T07:23:25,547 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 86f29501a2a5406ba5fc3a7b574f065a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732778603380 2024-11-28T07:23:25,548 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 39c44b60b0d44cd5b850e61ad6d88bd7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732778604172 2024-11-28T07:23:25,558 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f9884ebf6ba54cb793bbdc7f56b5acde as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f9884ebf6ba54cb793bbdc7f56b5acde 2024-11-28T07:23:25,563 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into f9884ebf6ba54cb793bbdc7f56b5acde(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:25,563 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:25,563 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=12, startTime=1732778605446; duration=0sec 2024-11-28T07:23:25,563 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:25,563 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:25,568 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#487 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:25,568 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/312a95afa4c24de0b9f60ea7a93e5955 is 50, key is test_row_0/B:col10/1732778604172/Put/seqid=0 2024-11-28T07:23:25,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742403_1579 (size=16931) 2024-11-28T07:23:25,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/1814c6265802486ca3ed6aa9474fca51 2024-11-28T07:23:25,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/c50e787c2af9438b990440f8ed1ad9ee is 50, key is test_row_0/B:col10/1732778605526/Put/seqid=0 2024-11-28T07:23:25,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742404_1580 (size=12493) 2024-11-28T07:23:25,602 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/312a95afa4c24de0b9f60ea7a93e5955 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/312a95afa4c24de0b9f60ea7a93e5955 2024-11-28T07:23:25,611 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 312a95afa4c24de0b9f60ea7a93e5955(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:25,611 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:25,611 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=12, startTime=1732778605446; duration=0sec 2024-11-28T07:23:25,611 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:25,611 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:25,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742405_1581 (size=12151) 2024-11-28T07:23:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T07:23:25,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778665650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778665651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,684 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:25,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-28T07:23:25,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:25,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:25,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:25,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:25,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:25,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778665760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778665759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T07:23:25,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:25,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-28T07:23:25,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:25,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:25,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:25,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:25,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:25,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778665970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:25,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778665971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:25,994 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:25,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-28T07:23:25,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:25,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:25,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:25,995 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:25,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:25,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:26,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/c50e787c2af9438b990440f8ed1ad9ee 2024-11-28T07:23:26,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/cdeb664d776d41a3bf624792bec53e78 is 50, key is test_row_0/C:col10/1732778605526/Put/seqid=0 2024-11-28T07:23:26,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742406_1582 (size=12151) 2024-11-28T07:23:26,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/cdeb664d776d41a3bf624792bec53e78 2024-11-28T07:23:26,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/1814c6265802486ca3ed6aa9474fca51 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1814c6265802486ca3ed6aa9474fca51 2024-11-28T07:23:26,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1814c6265802486ca3ed6aa9474fca51, entries=250, sequenceid=168, filesize=16.5 K 2024-11-28T07:23:26,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/c50e787c2af9438b990440f8ed1ad9ee as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/c50e787c2af9438b990440f8ed1ad9ee 2024-11-28T07:23:26,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/c50e787c2af9438b990440f8ed1ad9ee, entries=150, sequenceid=168, filesize=11.9 K 2024-11-28T07:23:26,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/cdeb664d776d41a3bf624792bec53e78 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cdeb664d776d41a3bf624792bec53e78 2024-11-28T07:23:26,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cdeb664d776d41a3bf624792bec53e78, entries=150, sequenceid=168, filesize=11.9 K 2024-11-28T07:23:26,119 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e310f48e6ef0ed637c2d62fa297701bf in 590ms, sequenceid=168, compaction requested=false 2024-11-28T07:23:26,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T07:23:26,148 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:26,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-28T07:23:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:26,152 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T07:23:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:26,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:26,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/194fe0b611b14f179bb3087fef53f036 is 50, key is test_row_0/A:col10/1732778605648/Put/seqid=0 2024-11-28T07:23:26,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742407_1583 (size=12151) 2024-11-28T07:23:26,221 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/194fe0b611b14f179bb3087fef53f036 2024-11-28T07:23:26,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/f7efc4f5bd204e3b82813442ba3657ae is 50, key is test_row_0/B:col10/1732778605648/Put/seqid=0 2024-11-28T07:23:26,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742408_1584 (size=12151) 2024-11-28T07:23:26,286 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/f7efc4f5bd204e3b82813442ba3657ae 2024-11-28T07:23:26,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:26,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:26,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/1ef21a1c434c4420a2bffaa6869d2d37 is 50, key is test_row_0/C:col10/1732778605648/Put/seqid=0 2024-11-28T07:23:26,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:26,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778666317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:26,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:26,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778666323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:26,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742409_1585 (size=12151) 2024-11-28T07:23:26,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:26,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778666424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:26,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:26,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778666429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:26,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T07:23:26,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:26,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778666634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:26,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:26,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778666638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:26,735 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/1ef21a1c434c4420a2bffaa6869d2d37 2024-11-28T07:23:26,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/194fe0b611b14f179bb3087fef53f036 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/194fe0b611b14f179bb3087fef53f036 2024-11-28T07:23:26,752 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/194fe0b611b14f179bb3087fef53f036, entries=150, sequenceid=194, filesize=11.9 K 2024-11-28T07:23:26,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/f7efc4f5bd204e3b82813442ba3657ae as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/f7efc4f5bd204e3b82813442ba3657ae 2024-11-28T07:23:26,758 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/f7efc4f5bd204e3b82813442ba3657ae, entries=150, sequenceid=194, filesize=11.9 K 2024-11-28T07:23:26,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/1ef21a1c434c4420a2bffaa6869d2d37 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1ef21a1c434c4420a2bffaa6869d2d37 2024-11-28T07:23:26,766 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1ef21a1c434c4420a2bffaa6869d2d37, entries=150, sequenceid=194, filesize=11.9 K 2024-11-28T07:23:26,767 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e310f48e6ef0ed637c2d62fa297701bf in 615ms, sequenceid=194, compaction requested=true 2024-11-28T07:23:26,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:26,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:26,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-28T07:23:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-28T07:23:26,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-28T07:23:26,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2370 sec 2024-11-28T07:23:26,772 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.2420 sec 2024-11-28T07:23:26,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T07:23:26,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:26,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:26,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:26,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:26,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:26,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:26,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:26,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/8c354bc0f8af4c7cbfec1578e07ae599 is 50, key is test_row_0/A:col10/1732778606321/Put/seqid=0 2024-11-28T07:23:26,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742410_1586 (size=14541) 2024-11-28T07:23:26,999 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/8c354bc0f8af4c7cbfec1578e07ae599 2024-11-28T07:23:27,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/44bab056680d4026bdb5ef47d1d8645c is 50, key is test_row_0/B:col10/1732778606321/Put/seqid=0 2024-11-28T07:23:27,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742411_1587 (size=12151) 2024-11-28T07:23:27,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/44bab056680d4026bdb5ef47d1d8645c 2024-11-28T07:23:27,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/5b10ea1bc6324c5892d8fbba46260086 is 50, key is test_row_0/C:col10/1732778606321/Put/seqid=0 2024-11-28T07:23:27,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778667056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778667056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742412_1588 (size=12151) 2024-11-28T07:23:27,117 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/5b10ea1bc6324c5892d8fbba46260086 2024-11-28T07:23:27,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/8c354bc0f8af4c7cbfec1578e07ae599 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c354bc0f8af4c7cbfec1578e07ae599 2024-11-28T07:23:27,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c354bc0f8af4c7cbfec1578e07ae599, entries=200, sequenceid=209, filesize=14.2 K 2024-11-28T07:23:27,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/44bab056680d4026bdb5ef47d1d8645c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/44bab056680d4026bdb5ef47d1d8645c 2024-11-28T07:23:27,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/44bab056680d4026bdb5ef47d1d8645c, entries=150, sequenceid=209, filesize=11.9 K 2024-11-28T07:23:27,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/5b10ea1bc6324c5892d8fbba46260086 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5b10ea1bc6324c5892d8fbba46260086 2024-11-28T07:23:27,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5b10ea1bc6324c5892d8fbba46260086, entries=150, sequenceid=209, filesize=11.9 K 2024-11-28T07:23:27,153 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e310f48e6ef0ed637c2d62fa297701bf in 193ms, sequenceid=209, compaction requested=true 2024-11-28T07:23:27,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:27,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:27,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:27,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:27,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:27,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:27,153 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:27,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:27,153 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:27,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:27,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:27,155 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56116 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:27,155 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:27,155 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:27,155 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:27,155 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f9884ebf6ba54cb793bbdc7f56b5acde, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1814c6265802486ca3ed6aa9474fca51, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/194fe0b611b14f179bb3087fef53f036, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c354bc0f8af4c7cbfec1578e07ae599] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=54.8 K 2024-11-28T07:23:27,155 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/312a95afa4c24de0b9f60ea7a93e5955, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/c50e787c2af9438b990440f8ed1ad9ee, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/f7efc4f5bd204e3b82813442ba3657ae, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/44bab056680d4026bdb5ef47d1d8645c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=47.8 K 2024-11-28T07:23:27,156 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting f9884ebf6ba54cb793bbdc7f56b5acde, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732778604172 2024-11-28T07:23:27,156 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 312a95afa4c24de0b9f60ea7a93e5955, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732778604172 2024-11-28T07:23:27,158 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1814c6265802486ca3ed6aa9474fca51, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732778604876 2024-11-28T07:23:27,158 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting c50e787c2af9438b990440f8ed1ad9ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732778604876 2024-11-28T07:23:27,159 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 194fe0b611b14f179bb3087fef53f036, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732778605617 2024-11-28T07:23:27,159 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7efc4f5bd204e3b82813442ba3657ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732778605617 2024-11-28T07:23:27,159 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c354bc0f8af4c7cbfec1578e07ae599, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778606314 2024-11-28T07:23:27,159 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44bab056680d4026bdb5ef47d1d8645c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778606314 2024-11-28T07:23:27,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T07:23:27,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:27,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:27,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:27,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:27,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:27,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:27,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:27,183 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#496 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:27,184 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9a9beb6b8d9d4351b27446f60c5b21e3 is 50, key is test_row_0/B:col10/1732778606321/Put/seqid=0 2024-11-28T07:23:27,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f87a834b6fba4f99b4fccbaef7175012 is 50, key is test_row_0/A:col10/1732778607044/Put/seqid=0 2024-11-28T07:23:27,215 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#498 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:27,216 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/15e861db42fb49c1a8989e2914de9065 is 50, key is test_row_0/A:col10/1732778606321/Put/seqid=0 2024-11-28T07:23:27,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778667227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778667233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742414_1590 (size=14541) 2024-11-28T07:23:27,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f87a834b6fba4f99b4fccbaef7175012 2024-11-28T07:23:27,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742413_1589 (size=12629) 2024-11-28T07:23:27,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742415_1591 (size=12629) 2024-11-28T07:23:27,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/d2949e8e6e074457bad3407f95d18f77 is 50, key is test_row_0/B:col10/1732778607044/Put/seqid=0 2024-11-28T07:23:27,288 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9a9beb6b8d9d4351b27446f60c5b21e3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9a9beb6b8d9d4351b27446f60c5b21e3 2024-11-28T07:23:27,295 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/15e861db42fb49c1a8989e2914de9065 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15e861db42fb49c1a8989e2914de9065 2024-11-28T07:23:27,297 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 9a9beb6b8d9d4351b27446f60c5b21e3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:27,297 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:27,297 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=12, startTime=1732778607153; duration=0sec 2024-11-28T07:23:27,297 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:27,297 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:27,297 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:27,299 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:27,299 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:27,299 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:27,299 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d852f14bd80e4a1ab82abc801a18daaf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cdeb664d776d41a3bf624792bec53e78, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1ef21a1c434c4420a2bffaa6869d2d37, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5b10ea1bc6324c5892d8fbba46260086] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=47.8 K 2024-11-28T07:23:27,299 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d852f14bd80e4a1ab82abc801a18daaf, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732778604172 2024-11-28T07:23:27,304 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting cdeb664d776d41a3bf624792bec53e78, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1732778604876 2024-11-28T07:23:27,305 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ef21a1c434c4420a2bffaa6869d2d37, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732778605617 2024-11-28T07:23:27,305 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b10ea1bc6324c5892d8fbba46260086, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778606314 2024-11-28T07:23:27,308 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into 15e861db42fb49c1a8989e2914de9065(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:27,308 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:27,308 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=12, startTime=1732778607153; duration=0sec 2024-11-28T07:23:27,308 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:27,308 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:27,323 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#500 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:27,323 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/85d6f97be55a4d658f15ba6f1860dd70 is 50, key is test_row_0/C:col10/1732778606321/Put/seqid=0 2024-11-28T07:23:27,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778667338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742416_1592 (size=12151) 2024-11-28T07:23:27,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/d2949e8e6e074457bad3407f95d18f77 2024-11-28T07:23:27,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778667345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,380 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/18e9c20b1f10419f8a4c332b3aad5774 is 50, key is test_row_0/C:col10/1732778607044/Put/seqid=0 2024-11-28T07:23:27,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742417_1593 (size=12629) 2024-11-28T07:23:27,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742418_1594 (size=12151) 2024-11-28T07:23:27,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/18e9c20b1f10419f8a4c332b3aad5774 2024-11-28T07:23:27,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f87a834b6fba4f99b4fccbaef7175012 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f87a834b6fba4f99b4fccbaef7175012 2024-11-28T07:23:27,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f87a834b6fba4f99b4fccbaef7175012, entries=200, sequenceid=231, filesize=14.2 K 2024-11-28T07:23:27,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/d2949e8e6e074457bad3407f95d18f77 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/d2949e8e6e074457bad3407f95d18f77 2024-11-28T07:23:27,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/d2949e8e6e074457bad3407f95d18f77, entries=150, sequenceid=231, filesize=11.9 K 2024-11-28T07:23:27,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/18e9c20b1f10419f8a4c332b3aad5774 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/18e9c20b1f10419f8a4c332b3aad5774 2024-11-28T07:23:27,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/18e9c20b1f10419f8a4c332b3aad5774, entries=150, sequenceid=231, filesize=11.9 K 2024-11-28T07:23:27,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for e310f48e6ef0ed637c2d62fa297701bf in 290ms, sequenceid=231, compaction requested=false 2024-11-28T07:23:27,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:27,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:23:27,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:27,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:27,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:27,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:27,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:27,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:27,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/8ee555b059ef4483b703641259493d5c is 50, key is test_row_0/A:col10/1732778607232/Put/seqid=0 2024-11-28T07:23:27,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742419_1595 (size=12151) 2024-11-28T07:23:27,546 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/8ee555b059ef4483b703641259493d5c 2024-11-28T07:23:27,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e958e64433bd4b2c8593ea0c4aed84c4 is 50, key is test_row_0/B:col10/1732778607232/Put/seqid=0 2024-11-28T07:23:27,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778667571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778667573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778667576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778667578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778667584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742420_1596 (size=12151) 2024-11-28T07:23:27,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T07:23:27,638 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-28T07:23:27,639 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:27,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-28T07:23:27,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T07:23:27,641 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:27,644 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:27,644 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:27,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778667686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778667686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778667687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778667687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778667691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T07:23:27,787 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/85d6f97be55a4d658f15ba6f1860dd70 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/85d6f97be55a4d658f15ba6f1860dd70 2024-11-28T07:23:27,793 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into 85d6f97be55a4d658f15ba6f1860dd70(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:27,793 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:27,793 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=12, startTime=1732778607153; duration=0sec 2024-11-28T07:23:27,793 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:27,793 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:27,796 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:27,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-28T07:23:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:27,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:27,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:27,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:27,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:27,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778667895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778667897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778667897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778667897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778667897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:27,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T07:23:27,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:27,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-28T07:23:27,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:27,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:27,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:27,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:27,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:27,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:28,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e958e64433bd4b2c8593ea0c4aed84c4 2024-11-28T07:23:28,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/cb015ec5fee042e38083b441cd4dbe7a is 50, key is test_row_0/C:col10/1732778607232/Put/seqid=0 2024-11-28T07:23:28,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742421_1597 (size=12151) 2024-11-28T07:23:28,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/cb015ec5fee042e38083b441cd4dbe7a 2024-11-28T07:23:28,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/8ee555b059ef4483b703641259493d5c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8ee555b059ef4483b703641259493d5c 2024-11-28T07:23:28,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8ee555b059ef4483b703641259493d5c, entries=150, sequenceid=248, filesize=11.9 K 2024-11-28T07:23:28,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e958e64433bd4b2c8593ea0c4aed84c4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e958e64433bd4b2c8593ea0c4aed84c4 2024-11-28T07:23:28,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e958e64433bd4b2c8593ea0c4aed84c4, entries=150, sequenceid=248, filesize=11.9 K 2024-11-28T07:23:28,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/cb015ec5fee042e38083b441cd4dbe7a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cb015ec5fee042e38083b441cd4dbe7a 2024-11-28T07:23:28,101 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cb015ec5fee042e38083b441cd4dbe7a, entries=150, sequenceid=248, filesize=11.9 K 2024-11-28T07:23:28,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e310f48e6ef0ed637c2d62fa297701bf in 599ms, sequenceid=248, compaction requested=true 2024-11-28T07:23:28,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:28,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:28,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:28,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:28,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:28,102 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:28,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:28,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:23:28,103 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:28,103 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:28,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-28T07:23:28,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:28,104 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T07:23:28,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:28,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:28,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:28,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:28,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:28,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:28,105 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39321 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:28,105 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:28,106 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:28,106 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:28,106 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:28,106 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:28,106 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9a9beb6b8d9d4351b27446f60c5b21e3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/d2949e8e6e074457bad3407f95d18f77, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e958e64433bd4b2c8593ea0c4aed84c4] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=36.1 K 2024-11-28T07:23:28,106 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15e861db42fb49c1a8989e2914de9065, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f87a834b6fba4f99b4fccbaef7175012, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8ee555b059ef4483b703641259493d5c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=38.4 K 2024-11-28T07:23:28,106 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a9beb6b8d9d4351b27446f60c5b21e3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778606314 2024-11-28T07:23:28,106 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15e861db42fb49c1a8989e2914de9065, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778606314 2024-11-28T07:23:28,107 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d2949e8e6e074457bad3407f95d18f77, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732778607044 2024-11-28T07:23:28,107 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e958e64433bd4b2c8593ea0c4aed84c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732778607226 2024-11-28T07:23:28,108 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f87a834b6fba4f99b4fccbaef7175012, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732778607044 2024-11-28T07:23:28,108 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ee555b059ef4483b703641259493d5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732778607226 2024-11-28T07:23:28,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/7e983934b86643f29559319abf8d0ad1 is 50, key is test_row_0/A:col10/1732778607583/Put/seqid=0 2024-11-28T07:23:28,127 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#506 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:28,128 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/c449eac2a306449c8a9fde567e4e9256 is 50, key is test_row_0/A:col10/1732778607232/Put/seqid=0 2024-11-28T07:23:28,146 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#507 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:28,147 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/47df76a3f2c844debe8e9c9ba55ea2a4 is 50, key is test_row_0/B:col10/1732778607232/Put/seqid=0 2024-11-28T07:23:28,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742422_1598 (size=12301) 2024-11-28T07:23:28,193 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/7e983934b86643f29559319abf8d0ad1 2024-11-28T07:23:28,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742423_1599 (size=12731) 2024-11-28T07:23:28,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:28,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742424_1600 (size=12731) 2024-11-28T07:23:28,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/5e6537cbaf184d6980956b602841d41c is 50, key is test_row_0/B:col10/1732778607583/Put/seqid=0 2024-11-28T07:23:28,243 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/c449eac2a306449c8a9fde567e4e9256 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/c449eac2a306449c8a9fde567e4e9256 2024-11-28T07:23:28,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,244 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/47df76a3f2c844debe8e9c9ba55ea2a4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/47df76a3f2c844debe8e9c9ba55ea2a4 2024-11-28T07:23:28,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778668232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T07:23:28,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778668241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,256 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 47df76a3f2c844debe8e9c9ba55ea2a4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:28,256 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:28,256 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=13, startTime=1732778608102; duration=0sec 2024-11-28T07:23:28,256 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:28,256 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:28,256 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:28,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778668243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778668243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778668243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,259 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:28,259 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:28,259 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:28,259 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/85d6f97be55a4d658f15ba6f1860dd70, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/18e9c20b1f10419f8a4c332b3aad5774, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cb015ec5fee042e38083b441cd4dbe7a] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=36.1 K 2024-11-28T07:23:28,259 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 85d6f97be55a4d658f15ba6f1860dd70, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732778606314 2024-11-28T07:23:28,259 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 18e9c20b1f10419f8a4c332b3aad5774, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1732778607044 2024-11-28T07:23:28,260 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting cb015ec5fee042e38083b441cd4dbe7a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732778607226 2024-11-28T07:23:28,261 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into c449eac2a306449c8a9fde567e4e9256(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:28,262 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:28,262 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=13, startTime=1732778608102; duration=0sec 2024-11-28T07:23:28,262 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:28,262 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:28,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742425_1601 (size=12301) 2024-11-28T07:23:28,274 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/5e6537cbaf184d6980956b602841d41c 2024-11-28T07:23:28,288 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#509 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:28,291 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/8d7f0bcd5b8e43ba8799e2cb1112114e is 50, key is test_row_0/C:col10/1732778607232/Put/seqid=0 2024-11-28T07:23:28,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/b943d4620e4b4a42ba42c98e1c3efb4b is 50, key is test_row_0/C:col10/1732778607583/Put/seqid=0 2024-11-28T07:23:28,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778668345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742426_1602 (size=12731) 2024-11-28T07:23:28,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778668356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778668357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778668368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778668369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742427_1603 (size=12301) 2024-11-28T07:23:28,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778668556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778668563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778668568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778668571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778668573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T07:23:28,755 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/8d7f0bcd5b8e43ba8799e2cb1112114e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8d7f0bcd5b8e43ba8799e2cb1112114e 2024-11-28T07:23:28,759 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into 8d7f0bcd5b8e43ba8799e2cb1112114e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:28,760 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:28,760 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=13, startTime=1732778608102; duration=0sec 2024-11-28T07:23:28,760 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:28,760 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:28,781 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/b943d4620e4b4a42ba42c98e1c3efb4b 2024-11-28T07:23:28,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/7e983934b86643f29559319abf8d0ad1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/7e983934b86643f29559319abf8d0ad1 2024-11-28T07:23:28,791 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/7e983934b86643f29559319abf8d0ad1, entries=150, sequenceid=270, filesize=12.0 K 2024-11-28T07:23:28,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/5e6537cbaf184d6980956b602841d41c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/5e6537cbaf184d6980956b602841d41c 2024-11-28T07:23:28,799 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/5e6537cbaf184d6980956b602841d41c, entries=150, sequenceid=270, filesize=12.0 K 2024-11-28T07:23:28,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/b943d4620e4b4a42ba42c98e1c3efb4b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b943d4620e4b4a42ba42c98e1c3efb4b 2024-11-28T07:23:28,810 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b943d4620e4b4a42ba42c98e1c3efb4b, entries=150, sequenceid=270, filesize=12.0 K 2024-11-28T07:23:28,810 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=93.93 KB/96180 for e310f48e6ef0ed637c2d62fa297701bf in 706ms, sequenceid=270, compaction requested=false 2024-11-28T07:23:28,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:28,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:28,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-28T07:23:28,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-28T07:23:28,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-28T07:23:28,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1680 sec 2024-11-28T07:23:28,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.1750 sec 2024-11-28T07:23:28,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:28,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-28T07:23:28,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:28,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:28,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:28,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:28,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:28,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:28,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/15ee75e9aff241c7b5abf5b30f09276f is 50, key is test_row_0/A:col10/1732778608241/Put/seqid=0 2024-11-28T07:23:28,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742428_1604 (size=14741) 2024-11-28T07:23:28,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/15ee75e9aff241c7b5abf5b30f09276f 2024-11-28T07:23:28,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778668905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778668906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9dbbba67f441417e936d2ebc75602cc3 is 50, key is test_row_0/B:col10/1732778608241/Put/seqid=0 2024-11-28T07:23:28,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778668909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778668915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:28,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778668917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:28,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742429_1605 (size=12301) 2024-11-28T07:23:28,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9dbbba67f441417e936d2ebc75602cc3 2024-11-28T07:23:28,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/571849bac85d4791ae9d57ce2021dd41 is 50, key is test_row_0/C:col10/1732778608241/Put/seqid=0 2024-11-28T07:23:29,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778669020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778669020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778669025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778669026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778669026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742430_1606 (size=12301) 2024-11-28T07:23:29,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/571849bac85d4791ae9d57ce2021dd41 2024-11-28T07:23:29,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/15ee75e9aff241c7b5abf5b30f09276f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15ee75e9aff241c7b5abf5b30f09276f 2024-11-28T07:23:29,050 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15ee75e9aff241c7b5abf5b30f09276f, entries=200, sequenceid=291, filesize=14.4 K 2024-11-28T07:23:29,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9dbbba67f441417e936d2ebc75602cc3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9dbbba67f441417e936d2ebc75602cc3 2024-11-28T07:23:29,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9dbbba67f441417e936d2ebc75602cc3, entries=150, sequenceid=291, filesize=12.0 K 2024-11-28T07:23:29,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/571849bac85d4791ae9d57ce2021dd41 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/571849bac85d4791ae9d57ce2021dd41 2024-11-28T07:23:29,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/571849bac85d4791ae9d57ce2021dd41, entries=150, sequenceid=291, filesize=12.0 K 2024-11-28T07:23:29,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for e310f48e6ef0ed637c2d62fa297701bf in 202ms, sequenceid=291, compaction requested=true 2024-11-28T07:23:29,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:29,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:29,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:29,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:29,069 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:29,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:29,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:29,069 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:29,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:29,073 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:29,073 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:29,073 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:29,073 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:29,073 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:29,073 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:29,073 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/c449eac2a306449c8a9fde567e4e9256, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/7e983934b86643f29559319abf8d0ad1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15ee75e9aff241c7b5abf5b30f09276f] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=38.8 K 2024-11-28T07:23:29,073 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/47df76a3f2c844debe8e9c9ba55ea2a4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/5e6537cbaf184d6980956b602841d41c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9dbbba67f441417e936d2ebc75602cc3] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=36.5 K 2024-11-28T07:23:29,073 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting c449eac2a306449c8a9fde567e4e9256, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732778607226 2024-11-28T07:23:29,073 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 47df76a3f2c844debe8e9c9ba55ea2a4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732778607226 2024-11-28T07:23:29,074 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e983934b86643f29559319abf8d0ad1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732778607571 2024-11-28T07:23:29,074 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e6537cbaf184d6980956b602841d41c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732778607571 2024-11-28T07:23:29,074 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dbbba67f441417e936d2ebc75602cc3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732778608241 2024-11-28T07:23:29,074 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15ee75e9aff241c7b5abf5b30f09276f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732778608241 2024-11-28T07:23:29,090 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#514 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:29,090 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/a6953fb775104e2b88905d714ccc2327 is 50, key is test_row_0/A:col10/1732778608241/Put/seqid=0 2024-11-28T07:23:29,103 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#515 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:29,103 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/8888da23e54e4ad39110166ed865190b is 50, key is test_row_0/B:col10/1732778608241/Put/seqid=0 2024-11-28T07:23:29,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742431_1607 (size=12983) 2024-11-28T07:23:29,151 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/a6953fb775104e2b88905d714ccc2327 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a6953fb775104e2b88905d714ccc2327 2024-11-28T07:23:29,155 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into a6953fb775104e2b88905d714ccc2327(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:29,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:29,155 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=13, startTime=1732778609069; duration=0sec 2024-11-28T07:23:29,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:29,155 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:29,156 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:29,156 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:29,156 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:29,156 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:29,157 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8d7f0bcd5b8e43ba8799e2cb1112114e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b943d4620e4b4a42ba42c98e1c3efb4b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/571849bac85d4791ae9d57ce2021dd41] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=36.5 K 2024-11-28T07:23:29,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742432_1608 (size=12983) 2024-11-28T07:23:29,161 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d7f0bcd5b8e43ba8799e2cb1112114e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1732778607226 2024-11-28T07:23:29,162 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b943d4620e4b4a42ba42c98e1c3efb4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1732778607571 2024-11-28T07:23:29,162 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 571849bac85d4791ae9d57ce2021dd41, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732778608241 2024-11-28T07:23:29,170 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#516 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:29,171 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/dc78d1211ed340d588a9462408220cb6 is 50, key is test_row_0/C:col10/1732778608241/Put/seqid=0 2024-11-28T07:23:29,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742433_1609 (size=12983) 2024-11-28T07:23:29,184 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/8888da23e54e4ad39110166ed865190b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/8888da23e54e4ad39110166ed865190b 2024-11-28T07:23:29,189 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 8888da23e54e4ad39110166ed865190b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:29,189 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:29,189 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=13, startTime=1732778609069; duration=0sec 2024-11-28T07:23:29,189 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:29,189 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:29,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-28T07:23:29,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:29,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:29,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:29,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:29,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:29,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:29,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:29,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/143f32a1717d40e78eb8de147e1a1bf4 is 50, key is test_row_0/A:col10/1732778608915/Put/seqid=0 2024-11-28T07:23:29,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742434_1610 (size=14741) 2024-11-28T07:23:29,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/143f32a1717d40e78eb8de147e1a1bf4 2024-11-28T07:23:29,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778669265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778669267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778669268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778669271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778669279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e028d4d7b8e241d6bbe96cf6d32dc131 is 50, key is test_row_0/B:col10/1732778608915/Put/seqid=0 2024-11-28T07:23:29,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742435_1611 (size=12301) 2024-11-28T07:23:29,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778669380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778669380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778669381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778669386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778669390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,590 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/dc78d1211ed340d588a9462408220cb6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/dc78d1211ed340d588a9462408220cb6 2024-11-28T07:23:29,596 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into dc78d1211ed340d588a9462408220cb6(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:29,596 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:29,596 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=13, startTime=1732778609069; duration=0sec 2024-11-28T07:23:29,596 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:29,596 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:29,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778669593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778669593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778669594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778669598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:29,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778669602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:29,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T07:23:29,746 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-28T07:23:29,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e028d4d7b8e241d6bbe96cf6d32dc131 2024-11-28T07:23:29,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:29,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-28T07:23:29,749 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:29,750 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:29,750 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:29,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T07:23:29,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/1923aa2162644f638f00d08c2f20afb3 is 50, key is test_row_0/C:col10/1732778608915/Put/seqid=0 2024-11-28T07:23:29,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742436_1612 (size=12301) 2024-11-28T07:23:29,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/1923aa2162644f638f00d08c2f20afb3 2024-11-28T07:23:29,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/143f32a1717d40e78eb8de147e1a1bf4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/143f32a1717d40e78eb8de147e1a1bf4 2024-11-28T07:23:29,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/143f32a1717d40e78eb8de147e1a1bf4, entries=200, sequenceid=312, filesize=14.4 K 2024-11-28T07:23:29,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e028d4d7b8e241d6bbe96cf6d32dc131 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e028d4d7b8e241d6bbe96cf6d32dc131 2024-11-28T07:23:29,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e028d4d7b8e241d6bbe96cf6d32dc131, entries=150, sequenceid=312, filesize=12.0 K 2024-11-28T07:23:29,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/1923aa2162644f638f00d08c2f20afb3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1923aa2162644f638f00d08c2f20afb3 2024-11-28T07:23:29,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1923aa2162644f638f00d08c2f20afb3, entries=150, sequenceid=312, filesize=12.0 K 2024-11-28T07:23:29,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for e310f48e6ef0ed637c2d62fa297701bf in 610ms, sequenceid=312, compaction requested=false 2024-11-28T07:23:29,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:29,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T07:23:29,901 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:29,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-28T07:23:29,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:29,902 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-28T07:23:29,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:29,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:29,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:29,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:29,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:29,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:29,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/716582cd7097407490f77ae09dd27833 is 50, key is test_row_0/A:col10/1732778609269/Put/seqid=0 2024-11-28T07:23:29,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:29,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:29,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742437_1613 (size=12301) 2024-11-28T07:23:29,942 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/716582cd7097407490f77ae09dd27833 2024-11-28T07:23:29,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/7c1b6f2fd4cf4dadbddc61d529ae21b6 is 50, key is test_row_0/B:col10/1732778609269/Put/seqid=0 2024-11-28T07:23:29,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742438_1614 (size=12301) 2024-11-28T07:23:29,988 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/7c1b6f2fd4cf4dadbddc61d529ae21b6 2024-11-28T07:23:29,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/4060a04976774d129e8a571a1f2b6b2d is 50, key is test_row_0/C:col10/1732778609269/Put/seqid=0 2024-11-28T07:23:30,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778669993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778669994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778669994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778669995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778669995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742439_1615 (size=12301) 2024-11-28T07:23:30,023 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/4060a04976774d129e8a571a1f2b6b2d 2024-11-28T07:23:30,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/716582cd7097407490f77ae09dd27833 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/716582cd7097407490f77ae09dd27833 2024-11-28T07:23:30,034 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/716582cd7097407490f77ae09dd27833, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T07:23:30,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/7c1b6f2fd4cf4dadbddc61d529ae21b6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/7c1b6f2fd4cf4dadbddc61d529ae21b6 2024-11-28T07:23:30,040 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/7c1b6f2fd4cf4dadbddc61d529ae21b6, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T07:23:30,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/4060a04976774d129e8a571a1f2b6b2d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/4060a04976774d129e8a571a1f2b6b2d 2024-11-28T07:23:30,047 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/4060a04976774d129e8a571a1f2b6b2d, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T07:23:30,048 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for e310f48e6ef0ed637c2d62fa297701bf in 146ms, sequenceid=330, compaction requested=true 2024-11-28T07:23:30,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:30,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-28T07:23:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-28T07:23:30,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-28T07:23:30,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 300 msec 2024-11-28T07:23:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T07:23:30,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 305 msec 2024-11-28T07:23:30,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:30,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T07:23:30,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:30,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:30,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:30,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:30,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:30,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:30,124 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/cb580e542c014fa5b45af1f69bbb7f62 is 50, key is test_row_0/A:col10/1732778610115/Put/seqid=0 2024-11-28T07:23:30,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742440_1616 (size=19621) 2024-11-28T07:23:30,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/cb580e542c014fa5b45af1f69bbb7f62 2024-11-28T07:23:30,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778670141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778670148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778670150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778670150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778670159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/41c1371e65d14a778d7a3463c79c1034 is 50, key is test_row_0/B:col10/1732778610115/Put/seqid=0 2024-11-28T07:23:30,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742441_1617 (size=12301) 2024-11-28T07:23:30,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778670261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778670266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778670266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778670267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778670272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T07:23:30,354 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-28T07:23:30,355 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-28T07:23:30,358 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:30,359 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:30,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T07:23:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T07:23:30,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778670471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778670480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778670480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778670481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778670481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,512 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:30,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T07:23:30,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:30,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,513 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/41c1371e65d14a778d7a3463c79c1034 2024-11-28T07:23:30,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/ff146b548c274b60985a43a47215e6f7 is 50, key is test_row_0/C:col10/1732778610115/Put/seqid=0 2024-11-28T07:23:30,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742442_1618 (size=12301) 2024-11-28T07:23:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T07:23:30,662 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/ff146b548c274b60985a43a47215e6f7 2024-11-28T07:23:30,665 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:30,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T07:23:30,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:30,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/cb580e542c014fa5b45af1f69bbb7f62 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cb580e542c014fa5b45af1f69bbb7f62 2024-11-28T07:23:30,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cb580e542c014fa5b45af1f69bbb7f62, entries=300, sequenceid=352, filesize=19.2 K 2024-11-28T07:23:30,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/41c1371e65d14a778d7a3463c79c1034 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/41c1371e65d14a778d7a3463c79c1034 2024-11-28T07:23:30,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/41c1371e65d14a778d7a3463c79c1034, entries=150, sequenceid=352, filesize=12.0 K 2024-11-28T07:23:30,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/ff146b548c274b60985a43a47215e6f7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/ff146b548c274b60985a43a47215e6f7 2024-11-28T07:23:30,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/ff146b548c274b60985a43a47215e6f7, entries=150, sequenceid=352, filesize=12.0 K 2024-11-28T07:23:30,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for e310f48e6ef0ed637c2d62fa297701bf in 582ms, sequenceid=352, compaction requested=true 2024-11-28T07:23:30,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:30,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:30,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:30,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:30,698 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:30,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:30,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:30,698 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:30,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:30,700 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 59646 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:30,700 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:30,700 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,700 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a6953fb775104e2b88905d714ccc2327, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/143f32a1717d40e78eb8de147e1a1bf4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/716582cd7097407490f77ae09dd27833, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cb580e542c014fa5b45af1f69bbb7f62] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=58.2 K 2024-11-28T07:23:30,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:30,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:30,701 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,701 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/8888da23e54e4ad39110166ed865190b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e028d4d7b8e241d6bbe96cf6d32dc131, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/7c1b6f2fd4cf4dadbddc61d529ae21b6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/41c1371e65d14a778d7a3463c79c1034] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=48.7 K 2024-11-28T07:23:30,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6953fb775104e2b88905d714ccc2327, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732778608241 2024-11-28T07:23:30,701 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8888da23e54e4ad39110166ed865190b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732778608241 2024-11-28T07:23:30,701 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e028d4d7b8e241d6bbe96cf6d32dc131, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732778608888 2024-11-28T07:23:30,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 143f32a1717d40e78eb8de147e1a1bf4, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732778608888 2024-11-28T07:23:30,702 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 716582cd7097407490f77ae09dd27833, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732778609267 2024-11-28T07:23:30,702 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c1b6f2fd4cf4dadbddc61d529ae21b6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732778609267 2024-11-28T07:23:30,703 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb580e542c014fa5b45af1f69bbb7f62, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778609992 2024-11-28T07:23:30,703 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 41c1371e65d14a778d7a3463c79c1034, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778609993 2024-11-28T07:23:30,714 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#526 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:30,715 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/381588250f46472ebec7002739a471f1 is 50, key is test_row_0/B:col10/1732778610115/Put/seqid=0 2024-11-28T07:23:30,722 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#527 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:30,723 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/69a3c404da9641349462dbbe029d322e is 50, key is test_row_0/A:col10/1732778610115/Put/seqid=0 2024-11-28T07:23:30,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742443_1619 (size=13119) 2024-11-28T07:23:30,788 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/381588250f46472ebec7002739a471f1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/381588250f46472ebec7002739a471f1 2024-11-28T07:23:30,793 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 381588250f46472ebec7002739a471f1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:30,793 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:30,793 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=12, startTime=1732778610698; duration=0sec 2024-11-28T07:23:30,793 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:30,793 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:30,793 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:30,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:30,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:23:30,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:30,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:30,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:30,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:30,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:30,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:30,799 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:30,800 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:30,800 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,800 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/dc78d1211ed340d588a9462408220cb6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1923aa2162644f638f00d08c2f20afb3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/4060a04976774d129e8a571a1f2b6b2d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/ff146b548c274b60985a43a47215e6f7] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=48.7 K 2024-11-28T07:23:30,800 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting dc78d1211ed340d588a9462408220cb6, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732778608241 2024-11-28T07:23:30,801 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1923aa2162644f638f00d08c2f20afb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732778608888 2024-11-28T07:23:30,802 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4060a04976774d129e8a571a1f2b6b2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732778609267 2024-11-28T07:23:30,802 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ff146b548c274b60985a43a47215e6f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778609993 2024-11-28T07:23:30,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/bb8e56fca4f74db49c27d67a6a1e4453 is 50, key is test_row_0/A:col10/1732778610794/Put/seqid=0 2024-11-28T07:23:30,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742444_1620 (size=13119) 2024-11-28T07:23:30,818 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:30,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T07:23:30,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:30,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,822 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/69a3c404da9641349462dbbe029d322e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/69a3c404da9641349462dbbe029d322e 2024-11-28T07:23:30,828 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into 69a3c404da9641349462dbbe029d322e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:30,828 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:30,828 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=12, startTime=1732778610698; duration=0sec 2024-11-28T07:23:30,828 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:30,828 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:30,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742445_1621 (size=14741) 2024-11-28T07:23:30,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/bb8e56fca4f74db49c27d67a6a1e4453 2024-11-28T07:23:30,846 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#529 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:30,847 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d7316d4d41284e3bae1d09639f4c8c32 is 50, key is test_row_0/C:col10/1732778610115/Put/seqid=0 2024-11-28T07:23:30,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/722e70918fd34ce3b837c1bb9f039821 is 50, key is test_row_0/B:col10/1732778610794/Put/seqid=0 2024-11-28T07:23:30,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778670847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778670846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778670870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778670875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778670875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742447_1623 (size=12301) 2024-11-28T07:23:30,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742446_1622 (size=13119) 2024-11-28T07:23:30,919 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d7316d4d41284e3bae1d09639f4c8c32 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d7316d4d41284e3bae1d09639f4c8c32 2024-11-28T07:23:30,925 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into d7316d4d41284e3bae1d09639f4c8c32(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:30,925 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:30,925 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=12, startTime=1732778610698; duration=0sec 2024-11-28T07:23:30,925 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:30,925 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:30,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T07:23:30,972 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:30,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T07:23:30,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:30,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:30,973 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:30,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778670976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778670976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778670984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778670985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:30,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:30,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778670986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,125 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:31,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T07:23:31,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:31,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:31,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:31,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:31,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:31,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:31,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778671181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778671182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778671190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778671192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778671193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,279 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:31,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T07:23:31,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:31,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:31,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:31,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:31,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:31,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:31,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/722e70918fd34ce3b837c1bb9f039821 2024-11-28T07:23:31,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/e8d19ba24ea44d4f90aae8febebbea78 is 50, key is test_row_0/C:col10/1732778610794/Put/seqid=0 2024-11-28T07:23:31,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742448_1624 (size=12301) 2024-11-28T07:23:31,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/e8d19ba24ea44d4f90aae8febebbea78 2024-11-28T07:23:31,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/bb8e56fca4f74db49c27d67a6a1e4453 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/bb8e56fca4f74db49c27d67a6a1e4453 2024-11-28T07:23:31,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/bb8e56fca4f74db49c27d67a6a1e4453, entries=200, sequenceid=370, filesize=14.4 K 2024-11-28T07:23:31,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/722e70918fd34ce3b837c1bb9f039821 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/722e70918fd34ce3b837c1bb9f039821 2024-11-28T07:23:31,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/722e70918fd34ce3b837c1bb9f039821, entries=150, sequenceid=370, filesize=12.0 K 2024-11-28T07:23:31,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/e8d19ba24ea44d4f90aae8febebbea78 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e8d19ba24ea44d4f90aae8febebbea78 2024-11-28T07:23:31,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e8d19ba24ea44d4f90aae8febebbea78, entries=150, sequenceid=370, filesize=12.0 K 2024-11-28T07:23:31,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for e310f48e6ef0ed637c2d62fa297701bf in 577ms, sequenceid=370, compaction requested=false 2024-11-28T07:23:31,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:31,432 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:31,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T07:23:31,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:31,433 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-28T07:23:31,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:31,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:31,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:31,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:31,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:31,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:31,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/5b09acf357314624891c1b2cbadeb598 is 50, key is test_row_0/A:col10/1732778610859/Put/seqid=0 2024-11-28T07:23:31,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T07:23:31,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742449_1625 (size=12301) 2024-11-28T07:23:31,487 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/5b09acf357314624891c1b2cbadeb598 2024-11-28T07:23:31,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:31,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:31,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/4f5887ce02ce409dbc93f830341a878e is 50, key is test_row_0/B:col10/1732778610859/Put/seqid=0 2024-11-28T07:23:31,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742450_1626 (size=12301) 2024-11-28T07:23:31,530 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/4f5887ce02ce409dbc93f830341a878e 2024-11-28T07:23:31,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778671517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778671518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778671525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778671528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778671531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/5697e67e06c64cae999983847a3281b5 is 50, key is test_row_0/C:col10/1732778610859/Put/seqid=0 2024-11-28T07:23:31,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742451_1627 (size=12301) 2024-11-28T07:23:31,593 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/5697e67e06c64cae999983847a3281b5 2024-11-28T07:23:31,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/5b09acf357314624891c1b2cbadeb598 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/5b09acf357314624891c1b2cbadeb598 2024-11-28T07:23:31,602 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/5b09acf357314624891c1b2cbadeb598, entries=150, sequenceid=391, filesize=12.0 K 2024-11-28T07:23:31,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/4f5887ce02ce409dbc93f830341a878e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/4f5887ce02ce409dbc93f830341a878e 2024-11-28T07:23:31,608 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/4f5887ce02ce409dbc93f830341a878e, entries=150, sequenceid=391, filesize=12.0 K 2024-11-28T07:23:31,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/5697e67e06c64cae999983847a3281b5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5697e67e06c64cae999983847a3281b5 2024-11-28T07:23:31,614 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5697e67e06c64cae999983847a3281b5, entries=150, sequenceid=391, filesize=12.0 K 2024-11-28T07:23:31,615 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for e310f48e6ef0ed637c2d62fa297701bf in 182ms, sequenceid=391, compaction requested=true 2024-11-28T07:23:31,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:31,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:31,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-28T07:23:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-28T07:23:31,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-28T07:23:31,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2570 sec 2024-11-28T07:23:31,620 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.2640 sec 2024-11-28T07:23:31,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:31,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-28T07:23:31,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:31,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:31,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:31,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:31,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:31,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:31,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/a8755b0d97134821bf7ca5cd9987d756 is 50, key is test_row_0/A:col10/1732778611639/Put/seqid=0 2024-11-28T07:23:31,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742452_1628 (size=14741) 2024-11-28T07:23:31,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/a8755b0d97134821bf7ca5cd9987d756 2024-11-28T07:23:31,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778671678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778671679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778671682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/ce607fde247a4d6c90dee085a79f3ddb is 50, key is test_row_0/B:col10/1732778611639/Put/seqid=0 2024-11-28T07:23:31,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778671684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778671694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742453_1629 (size=12301) 2024-11-28T07:23:31,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/ce607fde247a4d6c90dee085a79f3ddb 2024-11-28T07:23:31,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/a60abae6dd9543d3b1639e988ffb7ae1 is 50, key is test_row_0/C:col10/1732778611639/Put/seqid=0 2024-11-28T07:23:31,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778671796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778671797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778671797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778671801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:31,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742454_1630 (size=12301) 2024-11-28T07:23:31,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:31,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778671808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778672007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778672007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778672007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778672010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778672022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/a60abae6dd9543d3b1639e988ffb7ae1 2024-11-28T07:23:32,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/a8755b0d97134821bf7ca5cd9987d756 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a8755b0d97134821bf7ca5cd9987d756 2024-11-28T07:23:32,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a8755b0d97134821bf7ca5cd9987d756, entries=200, sequenceid=409, filesize=14.4 K 2024-11-28T07:23:32,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/ce607fde247a4d6c90dee085a79f3ddb as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ce607fde247a4d6c90dee085a79f3ddb 2024-11-28T07:23:32,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ce607fde247a4d6c90dee085a79f3ddb, entries=150, sequenceid=409, filesize=12.0 K 2024-11-28T07:23:32,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/a60abae6dd9543d3b1639e988ffb7ae1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a60abae6dd9543d3b1639e988ffb7ae1 2024-11-28T07:23:32,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a60abae6dd9543d3b1639e988ffb7ae1, entries=150, sequenceid=409, filesize=12.0 K 2024-11-28T07:23:32,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for e310f48e6ef0ed637c2d62fa297701bf in 609ms, sequenceid=409, compaction requested=true 2024-11-28T07:23:32,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:32,250 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:32,251 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54902 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:32,251 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:32,252 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:32,252 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/69a3c404da9641349462dbbe029d322e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/bb8e56fca4f74db49c27d67a6a1e4453, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/5b09acf357314624891c1b2cbadeb598, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a8755b0d97134821bf7ca5cd9987d756] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=53.6 K 2024-11-28T07:23:32,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:32,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:32,252 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:32,253 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69a3c404da9641349462dbbe029d322e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778609993 2024-11-28T07:23:32,253 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb8e56fca4f74db49c27d67a6a1e4453, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732778610144 2024-11-28T07:23:32,253 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b09acf357314624891c1b2cbadeb598, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1732778610844 2024-11-28T07:23:32,254 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:32,254 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:32,254 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:32,254 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/381588250f46472ebec7002739a471f1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/722e70918fd34ce3b837c1bb9f039821, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/4f5887ce02ce409dbc93f830341a878e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ce607fde247a4d6c90dee085a79f3ddb] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=48.8 K 2024-11-28T07:23:32,255 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8755b0d97134821bf7ca5cd9987d756, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732778611511 2024-11-28T07:23:32,255 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 381588250f46472ebec7002739a471f1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778609993 2024-11-28T07:23:32,256 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 722e70918fd34ce3b837c1bb9f039821, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732778610144 2024-11-28T07:23:32,256 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f5887ce02ce409dbc93f830341a878e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1732778610844 2024-11-28T07:23:32,256 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting ce607fde247a4d6c90dee085a79f3ddb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732778611527 2024-11-28T07:23:32,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:32,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:32,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:32,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:32,274 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#538 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:32,275 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/2c8fd193bfd246a69ddd6b316ef460ae is 50, key is test_row_0/B:col10/1732778611639/Put/seqid=0 2024-11-28T07:23:32,286 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#539 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:32,287 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/4fc197208b704092bae39b60e9c17c55 is 50, key is test_row_0/A:col10/1732778611639/Put/seqid=0 2024-11-28T07:23:32,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:32,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-28T07:23:32,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:32,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:32,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:32,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:32,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:32,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:32,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742455_1631 (size=13255) 2024-11-28T07:23:32,339 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/2c8fd193bfd246a69ddd6b316ef460ae as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2c8fd193bfd246a69ddd6b316ef460ae 2024-11-28T07:23:32,346 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 2c8fd193bfd246a69ddd6b316ef460ae(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:32,347 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:32,347 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=12, startTime=1732778612252; duration=0sec 2024-11-28T07:23:32,347 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:32,347 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:32,347 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:32,349 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:32,349 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:32,349 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:32,349 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d7316d4d41284e3bae1d09639f4c8c32, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e8d19ba24ea44d4f90aae8febebbea78, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5697e67e06c64cae999983847a3281b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a60abae6dd9543d3b1639e988ffb7ae1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=48.8 K 2024-11-28T07:23:32,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742456_1632 (size=13255) 2024-11-28T07:23:32,350 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d7316d4d41284e3bae1d09639f4c8c32, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778609993 2024-11-28T07:23:32,353 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e8d19ba24ea44d4f90aae8febebbea78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732778610144 2024-11-28T07:23:32,357 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5697e67e06c64cae999983847a3281b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1732778610844 2024-11-28T07:23:32,357 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a60abae6dd9543d3b1639e988ffb7ae1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732778611527 2024-11-28T07:23:32,361 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/4fc197208b704092bae39b60e9c17c55 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/4fc197208b704092bae39b60e9c17c55 2024-11-28T07:23:32,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/e1c114f9f7124c42a68b5ef5d8909c58 is 50, key is test_row_0/A:col10/1732778612320/Put/seqid=0 2024-11-28T07:23:32,366 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into 4fc197208b704092bae39b60e9c17c55(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:32,366 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:32,366 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=12, startTime=1732778612250; duration=0sec 2024-11-28T07:23:32,366 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:32,367 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:32,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778672359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778672360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778672360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,390 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#541 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:32,391 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/164b232ddc214a4d8a8ecb8bcdfe7112 is 50, key is test_row_0/C:col10/1732778611639/Put/seqid=0 2024-11-28T07:23:32,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778672388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778672388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742457_1633 (size=12301) 2024-11-28T07:23:32,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742458_1634 (size=13255) 2024-11-28T07:23:32,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T07:23:32,464 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-28T07:23:32,467 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:32,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-11-28T07:23:32,469 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:32,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T07:23:32,469 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:32,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:32,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778672489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778672490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778672490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778672499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778672501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T07:23:32,621 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:32,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-28T07:23:32,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:32,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:32,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:32,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:32,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:32,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778672698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778672699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778672700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778672713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:32,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778672714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:32,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T07:23:32,774 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:32,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-28T07:23:32,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:32,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:32,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:32,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:32,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:32,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:32,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/e1c114f9f7124c42a68b5ef5d8909c58 2024-11-28T07:23:32,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/15546ca9eeb1429aa4329b45c045e359 is 50, key is test_row_0/B:col10/1732778612320/Put/seqid=0 2024-11-28T07:23:32,869 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/164b232ddc214a4d8a8ecb8bcdfe7112 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/164b232ddc214a4d8a8ecb8bcdfe7112 2024-11-28T07:23:32,874 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into 164b232ddc214a4d8a8ecb8bcdfe7112(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:32,874 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:32,874 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=12, startTime=1732778612257; duration=0sec 2024-11-28T07:23:32,874 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:32,874 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:32,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742459_1635 (size=12301) 2024-11-28T07:23:32,879 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/15546ca9eeb1429aa4329b45c045e359 2024-11-28T07:23:32,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/3cf8610ca71040f4be6b8fceaf058b4f is 50, key is test_row_0/C:col10/1732778612320/Put/seqid=0 2024-11-28T07:23:32,926 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:32,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-28T07:23:32,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:32,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:32,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:32,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:32,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:32,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742460_1636 (size=12301) 2024-11-28T07:23:33,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778673009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778673009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778673009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778673020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778673021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T07:23:33,081 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:33,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-28T07:23:33,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:33,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:33,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:33,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:33,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:33,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:33,235 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:33,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-28T07:23:33,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:33,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:33,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:33,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:33,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:33,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:33,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/3cf8610ca71040f4be6b8fceaf058b4f 2024-11-28T07:23:33,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/e1c114f9f7124c42a68b5ef5d8909c58 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e1c114f9f7124c42a68b5ef5d8909c58 2024-11-28T07:23:33,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e1c114f9f7124c42a68b5ef5d8909c58, entries=150, sequenceid=429, filesize=12.0 K 2024-11-28T07:23:33,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/15546ca9eeb1429aa4329b45c045e359 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/15546ca9eeb1429aa4329b45c045e359 2024-11-28T07:23:33,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/15546ca9eeb1429aa4329b45c045e359, entries=150, sequenceid=429, filesize=12.0 K 2024-11-28T07:23:33,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/3cf8610ca71040f4be6b8fceaf058b4f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3cf8610ca71040f4be6b8fceaf058b4f 2024-11-28T07:23:33,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3cf8610ca71040f4be6b8fceaf058b4f, entries=150, sequenceid=429, filesize=12.0 K 2024-11-28T07:23:33,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for e310f48e6ef0ed637c2d62fa297701bf in 1063ms, sequenceid=429, compaction requested=false 2024-11-28T07:23:33,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:33,389 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:33,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-11-28T07:23:33,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:33,390 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-28T07:23:33,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:33,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:33,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:33,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:33,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:33,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:33,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/8c5e00285b0c4bb7b91bff661dd6549a is 50, key is test_row_0/A:col10/1732778612359/Put/seqid=0 2024-11-28T07:23:33,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742461_1637 (size=12301) 2024-11-28T07:23:33,450 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/8c5e00285b0c4bb7b91bff661dd6549a 2024-11-28T07:23:33,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/b4771e376fa2406190971489f6cac041 is 50, key is test_row_0/B:col10/1732778612359/Put/seqid=0 2024-11-28T07:23:33,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742462_1638 (size=12301) 2024-11-28T07:23:33,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:33,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:33,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778673553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778673554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778673556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778673562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778673563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T07:23:33,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778673665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778673666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778673666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778673673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778673674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778673870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778673870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778673871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778673877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:33,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778673879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:33,902 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/b4771e376fa2406190971489f6cac041 2024-11-28T07:23:33,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d41c0054baa14c709326d29a6a5a44d7 is 50, key is test_row_0/C:col10/1732778612359/Put/seqid=0 2024-11-28T07:23:33,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742463_1639 (size=12301) 2024-11-28T07:23:34,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778674181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778674181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778674182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778674184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778674192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,360 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d41c0054baa14c709326d29a6a5a44d7 2024-11-28T07:23:34,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/8c5e00285b0c4bb7b91bff661dd6549a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c5e00285b0c4bb7b91bff661dd6549a 2024-11-28T07:23:34,375 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c5e00285b0c4bb7b91bff661dd6549a, entries=150, sequenceid=449, filesize=12.0 K 2024-11-28T07:23:34,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/b4771e376fa2406190971489f6cac041 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/b4771e376fa2406190971489f6cac041 2024-11-28T07:23:34,382 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/b4771e376fa2406190971489f6cac041, entries=150, sequenceid=449, filesize=12.0 K 2024-11-28T07:23:34,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d41c0054baa14c709326d29a6a5a44d7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d41c0054baa14c709326d29a6a5a44d7 2024-11-28T07:23:34,388 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d41c0054baa14c709326d29a6a5a44d7, entries=150, sequenceid=449, filesize=12.0 K 2024-11-28T07:23:34,389 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for e310f48e6ef0ed637c2d62fa297701bf in 998ms, sequenceid=449, compaction requested=true 2024-11-28T07:23:34,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:34,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:34,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-11-28T07:23:34,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-11-28T07:23:34,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-28T07:23:34,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9210 sec 2024-11-28T07:23:34,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 1.9260 sec 2024-11-28T07:23:34,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T07:23:34,582 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-28T07:23:34,584 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:34,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-11-28T07:23:34,586 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:34,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-28T07:23:34,587 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:34,587 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:34,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-28T07:23:34,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-28T07:23:34,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:34,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:34,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:34,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:34,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:34,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:34,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:34,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/6531aff7a5e6446da6cad314fb0ea754 is 50, key is test_row_0/A:col10/1732778614691/Put/seqid=0 2024-11-28T07:23:34,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778674721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778674727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742464_1640 (size=12301) 2024-11-28T07:23:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778674722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778674729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,740 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:34,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/6531aff7a5e6446da6cad314fb0ea754 2024-11-28T07:23:34,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-28T07:23:34,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:34,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:34,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:34,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:34,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:34,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:34,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778674738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/630e2d3282734db7861dcaf1092e70af is 50, key is test_row_0/B:col10/1732778614691/Put/seqid=0 2024-11-28T07:23:34,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742465_1641 (size=12301) 2024-11-28T07:23:34,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/630e2d3282734db7861dcaf1092e70af 2024-11-28T07:23:34,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/b478b84fa71840a6989d67e92f04634d is 50, key is test_row_0/C:col10/1732778614691/Put/seqid=0 2024-11-28T07:23:34,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778674839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778674840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778674841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778674842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:34,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778674851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:34,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742466_1642 (size=12301) 2024-11-28T07:23:34,878 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/b478b84fa71840a6989d67e92f04634d 2024-11-28T07:23:34,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/6531aff7a5e6446da6cad314fb0ea754 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/6531aff7a5e6446da6cad314fb0ea754 2024-11-28T07:23:34,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/6531aff7a5e6446da6cad314fb0ea754, entries=150, sequenceid=469, filesize=12.0 K 2024-11-28T07:23:34,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/630e2d3282734db7861dcaf1092e70af as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/630e2d3282734db7861dcaf1092e70af 2024-11-28T07:23:34,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-28T07:23:34,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/630e2d3282734db7861dcaf1092e70af, entries=150, sequenceid=469, filesize=12.0 K 2024-11-28T07:23:34,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/b478b84fa71840a6989d67e92f04634d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b478b84fa71840a6989d67e92f04634d 2024-11-28T07:23:34,894 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:34,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-28T07:23:34,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:34,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:34,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:34,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:34,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:34,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b478b84fa71840a6989d67e92f04634d, entries=150, sequenceid=469, filesize=12.0 K 2024-11-28T07:23:34,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for e310f48e6ef0ed637c2d62fa297701bf in 206ms, sequenceid=469, compaction requested=true 2024-11-28T07:23:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:34,898 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:23:34,899 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:34,900 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:34,900 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:34,900 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:34,900 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:34,900 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/4fc197208b704092bae39b60e9c17c55, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e1c114f9f7124c42a68b5ef5d8909c58, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c5e00285b0c4bb7b91bff661dd6549a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/6531aff7a5e6446da6cad314fb0ea754] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=49.0 K 2024-11-28T07:23:34,900 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:34,900 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:34,900 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2c8fd193bfd246a69ddd6b316ef460ae, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/15546ca9eeb1429aa4329b45c045e359, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/b4771e376fa2406190971489f6cac041, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/630e2d3282734db7861dcaf1092e70af] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=49.0 K 2024-11-28T07:23:34,901 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c8fd193bfd246a69ddd6b316ef460ae, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732778611527 2024-11-28T07:23:34,901 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fc197208b704092bae39b60e9c17c55, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732778611527 2024-11-28T07:23:34,901 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1c114f9f7124c42a68b5ef5d8909c58, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732778611669 2024-11-28T07:23:34,901 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 15546ca9eeb1429aa4329b45c045e359, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732778611669 2024-11-28T07:23:34,901 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c5e00285b0c4bb7b91bff661dd6549a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732778612357 2024-11-28T07:23:34,901 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b4771e376fa2406190971489f6cac041, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732778612357 2024-11-28T07:23:34,902 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6531aff7a5e6446da6cad314fb0ea754, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732778613550 2024-11-28T07:23:34,902 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 630e2d3282734db7861dcaf1092e70af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732778613550 2024-11-28T07:23:34,924 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#550 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:34,924 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/a02fb947142f4750a3f370546888f22c is 50, key is test_row_0/B:col10/1732778614691/Put/seqid=0 2024-11-28T07:23:34,934 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#551 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:34,935 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/191e8d4d8ff74501883f971dab566cc5 is 50, key is test_row_0/A:col10/1732778614691/Put/seqid=0 2024-11-28T07:23:34,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742467_1643 (size=13391) 2024-11-28T07:23:35,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742468_1644 (size=13391) 2024-11-28T07:23:35,046 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:35,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-11-28T07:23:35,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,047 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T07:23:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:35,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:35,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:35,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:35,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:35,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:35,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:35,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:35,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d4b69dedca5c4a1abfac95cf34f68fa3 is 50, key is test_row_0/A:col10/1732778614718/Put/seqid=0 2024-11-28T07:23:35,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742469_1645 (size=14741) 2024-11-28T07:23:35,082 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d4b69dedca5c4a1abfac95cf34f68fa3 2024-11-28T07:23:35,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/eb721a700ec04c27bf52daa6c825746d is 50, key is test_row_0/B:col10/1732778614718/Put/seqid=0 2024-11-28T07:23:35,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742470_1646 (size=12301) 2024-11-28T07:23:35,115 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/eb721a700ec04c27bf52daa6c825746d 2024-11-28T07:23:35,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d39e8c2a6e4845258fea7ed548f5c71d is 50, key is test_row_0/C:col10/1732778614718/Put/seqid=0 2024-11-28T07:23:35,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742471_1647 (size=12301) 2024-11-28T07:23:35,147 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=486 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d39e8c2a6e4845258fea7ed548f5c71d 2024-11-28T07:23:35,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778675092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778675094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d4b69dedca5c4a1abfac95cf34f68fa3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d4b69dedca5c4a1abfac95cf34f68fa3 2024-11-28T07:23:35,158 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d4b69dedca5c4a1abfac95cf34f68fa3, entries=200, sequenceid=486, filesize=14.4 K 2024-11-28T07:23:35,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/eb721a700ec04c27bf52daa6c825746d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/eb721a700ec04c27bf52daa6c825746d 2024-11-28T07:23:35,163 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/eb721a700ec04c27bf52daa6c825746d, entries=150, sequenceid=486, filesize=12.0 K 2024-11-28T07:23:35,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d39e8c2a6e4845258fea7ed548f5c71d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d39e8c2a6e4845258fea7ed548f5c71d 2024-11-28T07:23:35,169 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d39e8c2a6e4845258fea7ed548f5c71d, entries=150, sequenceid=486, filesize=12.0 K 2024-11-28T07:23:35,174 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for e310f48e6ef0ed637c2d62fa297701bf in 126ms, sequenceid=486, compaction requested=true 2024-11-28T07:23:35,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:35,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T07:23:35,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-11-28T07:23:35,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:35,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:35,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:35,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:35,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:35,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-11-28T07:23:35,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-28T07:23:35,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 589 msec 2024-11-28T07:23:35,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 594 msec 2024-11-28T07:23:35,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/de3149705edc48538a17fa54f2b2177b is 50, key is test_row_0/A:col10/1732778615173/Put/seqid=0 2024-11-28T07:23:35,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-28T07:23:35,189 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-11-28T07:23:35,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees 2024-11-28T07:23:35,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-28T07:23:35,193 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:35,194 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:35,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:35,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778675238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742472_1648 (size=14741) 2024-11-28T07:23:35,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=507 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/de3149705edc48538a17fa54f2b2177b 2024-11-28T07:23:35,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778675249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778675253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778675253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778675258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/08ffb326b9bc4d5cb5201130073471d0 is 50, key is test_row_0/B:col10/1732778615173/Put/seqid=0 2024-11-28T07:23:35,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-28T07:23:35,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742473_1649 (size=12301) 2024-11-28T07:23:35,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=507 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/08ffb326b9bc4d5cb5201130073471d0 2024-11-28T07:23:35,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d9f7aaf467914c64b92840ced5495861 is 50, key is test_row_0/C:col10/1732778615173/Put/seqid=0 2024-11-28T07:23:35,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:35,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-28T07:23:35,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:35,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:35,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:35,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778675355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742474_1650 (size=12301) 2024-11-28T07:23:35,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778675363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778675369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,396 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/a02fb947142f4750a3f370546888f22c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a02fb947142f4750a3f370546888f22c 2024-11-28T07:23:35,404 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into a02fb947142f4750a3f370546888f22c(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:35,404 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:35,404 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=12, startTime=1732778614898; duration=0sec 2024-11-28T07:23:35,405 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:35,405 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:35,405 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-28T07:23:35,409 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62459 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-28T07:23:35,410 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:35,410 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,410 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/164b232ddc214a4d8a8ecb8bcdfe7112, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3cf8610ca71040f4be6b8fceaf058b4f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d41c0054baa14c709326d29a6a5a44d7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b478b84fa71840a6989d67e92f04634d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d39e8c2a6e4845258fea7ed548f5c71d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=61.0 K 2024-11-28T07:23:35,411 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 164b232ddc214a4d8a8ecb8bcdfe7112, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732778611527 2024-11-28T07:23:35,411 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cf8610ca71040f4be6b8fceaf058b4f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732778611669 2024-11-28T07:23:35,412 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d41c0054baa14c709326d29a6a5a44d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732778612357 2024-11-28T07:23:35,413 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b478b84fa71840a6989d67e92f04634d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732778613550 2024-11-28T07:23:35,416 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d39e8c2a6e4845258fea7ed548f5c71d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732778614718 2024-11-28T07:23:35,419 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/191e8d4d8ff74501883f971dab566cc5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/191e8d4d8ff74501883f971dab566cc5 2024-11-28T07:23:35,426 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into 191e8d4d8ff74501883f971dab566cc5(size=13.1 K), total size for store is 27.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:35,426 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:35,426 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=12, startTime=1732778614898; duration=0sec 2024-11-28T07:23:35,426 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:35,427 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:35,433 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#558 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:35,433 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/6de8b052746e4d3fb224e0c828499d62 is 50, key is test_row_0/C:col10/1732778614718/Put/seqid=0 2024-11-28T07:23:35,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778675469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742475_1651 (size=13425) 2024-11-28T07:23:35,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778675471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,482 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/6de8b052746e4d3fb224e0c828499d62 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/6de8b052746e4d3fb224e0c828499d62 2024-11-28T07:23:35,494 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into 6de8b052746e4d3fb224e0c828499d62(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:35,494 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:35,494 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=11, startTime=1732778614898; duration=0sec 2024-11-28T07:23:35,494 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:35,494 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:35,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-28T07:23:35,498 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:35,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-28T07:23:35,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:35,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:35,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:35,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:35,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778675564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778675578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778675580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,652 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:35,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-28T07:23:35,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:35,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:35,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:35,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:35,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=507 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d9f7aaf467914c64b92840ced5495861 2024-11-28T07:23:35,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/de3149705edc48538a17fa54f2b2177b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/de3149705edc48538a17fa54f2b2177b 2024-11-28T07:23:35,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778675777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/de3149705edc48538a17fa54f2b2177b, entries=200, sequenceid=507, filesize=14.4 K 2024-11-28T07:23:35,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/08ffb326b9bc4d5cb5201130073471d0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/08ffb326b9bc4d5cb5201130073471d0 2024-11-28T07:23:35,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778675780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/08ffb326b9bc4d5cb5201130073471d0, entries=150, sequenceid=507, filesize=12.0 K 2024-11-28T07:23:35,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/d9f7aaf467914c64b92840ced5495861 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d9f7aaf467914c64b92840ced5495861 2024-11-28T07:23:35,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-28T07:23:35,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d9f7aaf467914c64b92840ced5495861, entries=150, sequenceid=507, filesize=12.0 K 2024-11-28T07:23:35,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e310f48e6ef0ed637c2d62fa297701bf in 622ms, sequenceid=507, compaction requested=true 2024-11-28T07:23:35,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:35,797 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:35,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:35,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:35,797 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:35,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:35,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:35,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:35,798 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:35,799 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42873 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:35,799 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:35,799 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,799 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/191e8d4d8ff74501883f971dab566cc5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d4b69dedca5c4a1abfac95cf34f68fa3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/de3149705edc48538a17fa54f2b2177b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=41.9 K 2024-11-28T07:23:35,799 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:35,799 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:35,799 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,800 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a02fb947142f4750a3f370546888f22c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/eb721a700ec04c27bf52daa6c825746d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/08ffb326b9bc4d5cb5201130073471d0] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=37.1 K 2024-11-28T07:23:35,800 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 191e8d4d8ff74501883f971dab566cc5, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732778613550 2024-11-28T07:23:35,800 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a02fb947142f4750a3f370546888f22c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732778613550 2024-11-28T07:23:35,800 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4b69dedca5c4a1abfac95cf34f68fa3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732778614718 2024-11-28T07:23:35,800 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting eb721a700ec04c27bf52daa6c825746d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732778614718 2024-11-28T07:23:35,801 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting de3149705edc48538a17fa54f2b2177b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732778615091 2024-11-28T07:23:35,801 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 08ffb326b9bc4d5cb5201130073471d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732778615091 2024-11-28T07:23:35,812 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:35,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-11-28T07:23:35,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:35,813 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:23:35,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:35,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:35,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:35,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:35,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:35,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:35,821 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#559 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:35,821 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9602b7ef1aa146f9a85b63e02126d731 is 50, key is test_row_0/B:col10/1732778615173/Put/seqid=0 2024-11-28T07:23:35,852 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#560 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:35,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d61fbe940b4b474d8dc1484af201b0f3 is 50, key is test_row_0/A:col10/1732778615235/Put/seqid=0 2024-11-28T07:23:35,853 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f200d62b7e804f4fbdf5456e5f70fc3e is 50, key is test_row_0/A:col10/1732778615173/Put/seqid=0 2024-11-28T07:23:35,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:35,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:35,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742476_1652 (size=13493) 2024-11-28T07:23:35,900 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/9602b7ef1aa146f9a85b63e02126d731 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9602b7ef1aa146f9a85b63e02126d731 2024-11-28T07:23:35,904 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 9602b7ef1aa146f9a85b63e02126d731(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:35,904 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:35,904 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=13, startTime=1732778615797; duration=0sec 2024-11-28T07:23:35,907 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:35,907 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:35,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742477_1653 (size=13493) 2024-11-28T07:23:35,909 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-28T07:23:35,909 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T07:23:35,909 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T07:23:35,909 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. because compaction request was cancelled 2024-11-28T07:23:35,909 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:35,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742478_1654 (size=12301) 2024-11-28T07:23:35,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778675939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778675942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:35,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:35,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778675943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778676050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778676053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778676056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778676253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778676255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778676266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778676286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778676288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-28T07:23:36,314 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f200d62b7e804f4fbdf5456e5f70fc3e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f200d62b7e804f4fbdf5456e5f70fc3e 2024-11-28T07:23:36,318 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into f200d62b7e804f4fbdf5456e5f70fc3e(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:36,318 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:36,318 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=13, startTime=1732778615797; duration=0sec 2024-11-28T07:23:36,318 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:36,318 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:36,322 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=525 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d61fbe940b4b474d8dc1484af201b0f3 2024-11-28T07:23:36,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/73392b9e00d748dba1cb2d1f5e235153 is 50, key is test_row_0/B:col10/1732778615235/Put/seqid=0 2024-11-28T07:23:36,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742479_1655 (size=12301) 2024-11-28T07:23:36,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778676565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778676566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:36,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778676574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:36,790 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=525 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/73392b9e00d748dba1cb2d1f5e235153 2024-11-28T07:23:36,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/0e7eb22fc18f4ff4b0ad514df7a88c41 is 50, key is test_row_0/C:col10/1732778615235/Put/seqid=0 2024-11-28T07:23:36,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742480_1656 (size=12301) 2024-11-28T07:23:36,847 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=525 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/0e7eb22fc18f4ff4b0ad514df7a88c41 2024-11-28T07:23:36,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/d61fbe940b4b474d8dc1484af201b0f3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d61fbe940b4b474d8dc1484af201b0f3 2024-11-28T07:23:36,867 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d61fbe940b4b474d8dc1484af201b0f3, entries=150, sequenceid=525, filesize=12.0 K 2024-11-28T07:23:36,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/73392b9e00d748dba1cb2d1f5e235153 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/73392b9e00d748dba1cb2d1f5e235153 2024-11-28T07:23:36,882 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/73392b9e00d748dba1cb2d1f5e235153, entries=150, sequenceid=525, filesize=12.0 K 2024-11-28T07:23:36,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/0e7eb22fc18f4ff4b0ad514df7a88c41 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/0e7eb22fc18f4ff4b0ad514df7a88c41 2024-11-28T07:23:36,888 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/0e7eb22fc18f4ff4b0ad514df7a88c41, entries=150, sequenceid=525, filesize=12.0 K 2024-11-28T07:23:36,889 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e310f48e6ef0ed637c2d62fa297701bf in 1076ms, sequenceid=525, compaction requested=true 2024-11-28T07:23:36,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:36,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:36,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=155 2024-11-28T07:23:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=155 2024-11-28T07:23:36,893 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-28T07:23:36,893 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6970 sec 2024-11-28T07:23:36,896 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees in 1.7030 sec 2024-11-28T07:23:37,079 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T07:23:37,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:37,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:37,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:37,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:37,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:37,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:37,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:37,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/08d8034714ea48dd83e447c337fabd07 is 50, key is test_row_0/A:col10/1732778617078/Put/seqid=0 2024-11-28T07:23:37,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778677107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778677108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778677105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742481_1657 (size=14741) 2024-11-28T07:23:37,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778677220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778677228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778677228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-28T07:23:37,297 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-11-28T07:23:37,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778677296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778677296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,309 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:37,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees 2024-11-28T07:23:37,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-28T07:23:37,311 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:37,312 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:37,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:37,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-28T07:23:37,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778677430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778677431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778677431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,463 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:37,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-28T07:23:37,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:37,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=549 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/08d8034714ea48dd83e447c337fabd07 2024-11-28T07:23:37,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/56c60ad60afd4579a2d2934a0d4ca4a4 is 50, key is test_row_0/B:col10/1732778617078/Put/seqid=0 2024-11-28T07:23:37,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742482_1658 (size=12301) 2024-11-28T07:23:37,608 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=549 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/56c60ad60afd4579a2d2934a0d4ca4a4 2024-11-28T07:23:37,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-28T07:23:37,617 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:37,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-28T07:23:37,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:37,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/007d2a56e55d4d4a894dd0aa015436c7 is 50, key is test_row_0/C:col10/1732778617078/Put/seqid=0 2024-11-28T07:23:37,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742483_1659 (size=12301) 2024-11-28T07:23:37,676 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=549 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/007d2a56e55d4d4a894dd0aa015436c7 2024-11-28T07:23:37,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/08d8034714ea48dd83e447c337fabd07 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/08d8034714ea48dd83e447c337fabd07 2024-11-28T07:23:37,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/08d8034714ea48dd83e447c337fabd07, entries=200, sequenceid=549, filesize=14.4 K 2024-11-28T07:23:37,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/56c60ad60afd4579a2d2934a0d4ca4a4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/56c60ad60afd4579a2d2934a0d4ca4a4 2024-11-28T07:23:37,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/56c60ad60afd4579a2d2934a0d4ca4a4, entries=150, sequenceid=549, filesize=12.0 K 2024-11-28T07:23:37,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/007d2a56e55d4d4a894dd0aa015436c7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/007d2a56e55d4d4a894dd0aa015436c7 2024-11-28T07:23:37,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/007d2a56e55d4d4a894dd0aa015436c7, entries=150, sequenceid=549, filesize=12.0 K 2024-11-28T07:23:37,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for e310f48e6ef0ed637c2d62fa297701bf in 618ms, sequenceid=549, compaction requested=true 2024-11-28T07:23:37,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:37,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:37,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:37,697 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:37,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:37,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:37,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:37,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:37,697 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:37,699 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40535 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:37,699 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:37,699 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,699 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f200d62b7e804f4fbdf5456e5f70fc3e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d61fbe940b4b474d8dc1484af201b0f3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/08d8034714ea48dd83e447c337fabd07] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=39.6 K 2024-11-28T07:23:37,699 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:37,699 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f200d62b7e804f4fbdf5456e5f70fc3e, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732778615091 2024-11-28T07:23:37,699 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:37,699 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,700 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9602b7ef1aa146f9a85b63e02126d731, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/73392b9e00d748dba1cb2d1f5e235153, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/56c60ad60afd4579a2d2934a0d4ca4a4] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=37.2 K 2024-11-28T07:23:37,700 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d61fbe940b4b474d8dc1484af201b0f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=525, earliestPutTs=1732778615217 2024-11-28T07:23:37,700 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08d8034714ea48dd83e447c337fabd07, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=549, earliestPutTs=1732778615929 2024-11-28T07:23:37,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9602b7ef1aa146f9a85b63e02126d731, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732778615091 2024-11-28T07:23:37,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 73392b9e00d748dba1cb2d1f5e235153, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=525, earliestPutTs=1732778615217 2024-11-28T07:23:37,701 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 56c60ad60afd4579a2d2934a0d4ca4a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=549, earliestPutTs=1732778615929 2024-11-28T07:23:37,722 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#567 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:37,723 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/34966c2e1004446f82074d2d7a175de1 is 50, key is test_row_0/B:col10/1732778617078/Put/seqid=0 2024-11-28T07:23:37,742 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#568 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:37,743 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/948e11bd836341809d3ec36318598707 is 50, key is test_row_0/A:col10/1732778617078/Put/seqid=0 2024-11-28T07:23:37,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:37,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:23:37,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:37,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:37,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:37,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:37,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:37,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:37,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:37,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-28T07:23:37,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:37,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742484_1660 (size=13595) 2024-11-28T07:23:37,790 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/34966c2e1004446f82074d2d7a175de1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/34966c2e1004446f82074d2d7a175de1 2024-11-28T07:23:37,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/824587a45c1d4d02bcdf772b449f8472 is 50, key is test_row_0/A:col10/1732778617105/Put/seqid=0 2024-11-28T07:23:37,808 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 34966c2e1004446f82074d2d7a175de1(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:37,808 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:37,808 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=13, startTime=1732778617697; duration=0sec 2024-11-28T07:23:37,808 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:37,808 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:37,808 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:37,810 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:37,810 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:37,810 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,810 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/6de8b052746e4d3fb224e0c828499d62, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d9f7aaf467914c64b92840ced5495861, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/0e7eb22fc18f4ff4b0ad514df7a88c41, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/007d2a56e55d4d4a894dd0aa015436c7] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=49.1 K 2024-11-28T07:23:37,811 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 6de8b052746e4d3fb224e0c828499d62, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=486, earliestPutTs=1732778614718 2024-11-28T07:23:37,811 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d9f7aaf467914c64b92840ced5495861, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=507, earliestPutTs=1732778615091 2024-11-28T07:23:37,812 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e7eb22fc18f4ff4b0ad514df7a88c41, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=525, earliestPutTs=1732778615217 2024-11-28T07:23:37,812 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 007d2a56e55d4d4a894dd0aa015436c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=549, earliestPutTs=1732778615929 2024-11-28T07:23:37,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742485_1661 (size=13595) 2024-11-28T07:23:37,830 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/948e11bd836341809d3ec36318598707 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/948e11bd836341809d3ec36318598707 2024-11-28T07:23:37,836 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into 948e11bd836341809d3ec36318598707(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:37,836 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:37,836 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=13, startTime=1732778617697; duration=0sec 2024-11-28T07:23:37,836 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:37,836 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:37,845 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#570 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:37,846 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/52b055feb5cc487aa55af9ba0b67a489 is 50, key is test_row_0/C:col10/1732778617078/Put/seqid=0 2024-11-28T07:23:37,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742486_1662 (size=17181) 2024-11-28T07:23:37,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/824587a45c1d4d02bcdf772b449f8472 2024-11-28T07:23:37,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742487_1663 (size=13561) 2024-11-28T07:23:37,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/97805e5e588b4573a95f28011dc0e1aa is 50, key is test_row_0/B:col10/1732778617105/Put/seqid=0 2024-11-28T07:23:37,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778677889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778677901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,911 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:37,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778677901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-28T07:23:37,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:37,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-28T07:23:37,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:37,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:37,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:37,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742488_1664 (size=12301) 2024-11-28T07:23:37,931 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/97805e5e588b4573a95f28011dc0e1aa 2024-11-28T07:23:37,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/8597b86a02bc4bf4b4780ec8f7d9ccd5 is 50, key is test_row_0/C:col10/1732778617105/Put/seqid=0 2024-11-28T07:23:37,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742489_1665 (size=12301) 2024-11-28T07:23:37,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/8597b86a02bc4bf4b4780ec8f7d9ccd5 2024-11-28T07:23:37,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/824587a45c1d4d02bcdf772b449f8472 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/824587a45c1d4d02bcdf772b449f8472 2024-11-28T07:23:38,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/824587a45c1d4d02bcdf772b449f8472, entries=250, sequenceid=565, filesize=16.8 K 2024-11-28T07:23:38,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/97805e5e588b4573a95f28011dc0e1aa as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/97805e5e588b4573a95f28011dc0e1aa 2024-11-28T07:23:38,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/97805e5e588b4573a95f28011dc0e1aa, entries=150, sequenceid=565, filesize=12.0 K 2024-11-28T07:23:38,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/8597b86a02bc4bf4b4780ec8f7d9ccd5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8597b86a02bc4bf4b4780ec8f7d9ccd5 2024-11-28T07:23:38,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8597b86a02bc4bf4b4780ec8f7d9ccd5, entries=150, sequenceid=565, filesize=12.0 K 2024-11-28T07:23:38,018 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for e310f48e6ef0ed637c2d62fa297701bf in 267ms, sequenceid=565, compaction requested=false 2024-11-28T07:23:38,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:38,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778678010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:38,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T07:23:38,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:38,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:38,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:38,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:38,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:38,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:38,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/95498c4b619f4b61a668a2ae1bb701e1 is 50, key is test_row_0/A:col10/1732778617898/Put/seqid=0 2024-11-28T07:23:38,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742490_1666 (size=12301) 2024-11-28T07:23:38,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=590 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/95498c4b619f4b61a668a2ae1bb701e1 2024-11-28T07:23:38,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/2a330d1b30d748deaa1ac658d23e1d96 is 50, key is test_row_0/B:col10/1732778617898/Put/seqid=0 2024-11-28T07:23:38,076 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:38,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-28T07:23:38,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:38,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778678082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778678081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742491_1667 (size=12301) 2024-11-28T07:23:38,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=590 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/2a330d1b30d748deaa1ac658d23e1d96 2024-11-28T07:23:38,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/a1b4522a6af441b697fc003ed3ee9032 is 50, key is test_row_0/C:col10/1732778617898/Put/seqid=0 2024-11-28T07:23:38,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742492_1668 (size=12301) 2024-11-28T07:23:38,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778678199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778678199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778678220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,232 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:38,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-28T07:23:38,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:38,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,294 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/52b055feb5cc487aa55af9ba0b67a489 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/52b055feb5cc487aa55af9ba0b67a489 2024-11-28T07:23:38,299 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into 52b055feb5cc487aa55af9ba0b67a489(size=13.2 K), total size for store is 25.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:38,299 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:38,299 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=12, startTime=1732778617697; duration=0sec 2024-11-28T07:23:38,300 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:38,300 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:38,385 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:38,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-28T07:23:38,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:38,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,386 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778678406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778678406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-28T07:23:38,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778678531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,539 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:38,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-28T07:23:38,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:38,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:38,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=590 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/a1b4522a6af441b697fc003ed3ee9032 2024-11-28T07:23:38,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/95498c4b619f4b61a668a2ae1bb701e1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/95498c4b619f4b61a668a2ae1bb701e1 2024-11-28T07:23:38,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/95498c4b619f4b61a668a2ae1bb701e1, entries=150, sequenceid=590, filesize=12.0 K 2024-11-28T07:23:38,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/2a330d1b30d748deaa1ac658d23e1d96 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2a330d1b30d748deaa1ac658d23e1d96 2024-11-28T07:23:38,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2a330d1b30d748deaa1ac658d23e1d96, entries=150, sequenceid=590, filesize=12.0 K 2024-11-28T07:23:38,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/a1b4522a6af441b697fc003ed3ee9032 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a1b4522a6af441b697fc003ed3ee9032 2024-11-28T07:23:38,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a1b4522a6af441b697fc003ed3ee9032, entries=150, sequenceid=590, filesize=12.0 K 2024-11-28T07:23:38,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for e310f48e6ef0ed637c2d62fa297701bf in 565ms, sequenceid=590, compaction requested=true 2024-11-28T07:23:38,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:38,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:38,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:38,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:38,588 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:38,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:38,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:38,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:38,588 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:38,588 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43077 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:38,589 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:38,589 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,589 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/948e11bd836341809d3ec36318598707, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/824587a45c1d4d02bcdf772b449f8472, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/95498c4b619f4b61a668a2ae1bb701e1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=42.1 K 2024-11-28T07:23:38,589 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38197 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:38,589 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:38,589 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,589 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/34966c2e1004446f82074d2d7a175de1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/97805e5e588b4573a95f28011dc0e1aa, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2a330d1b30d748deaa1ac658d23e1d96] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=37.3 K 2024-11-28T07:23:38,590 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 34966c2e1004446f82074d2d7a175de1, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=549, earliestPutTs=1732778615929 2024-11-28T07:23:38,590 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 948e11bd836341809d3ec36318598707, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=549, earliestPutTs=1732778615929 2024-11-28T07:23:38,590 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 97805e5e588b4573a95f28011dc0e1aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=565, earliestPutTs=1732778617105 2024-11-28T07:23:38,590 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 824587a45c1d4d02bcdf772b449f8472, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=565, earliestPutTs=1732778617101 2024-11-28T07:23:38,590 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a330d1b30d748deaa1ac658d23e1d96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=590, earliestPutTs=1732778617885 2024-11-28T07:23:38,590 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95498c4b619f4b61a668a2ae1bb701e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=590, earliestPutTs=1732778617885 2024-11-28T07:23:38,617 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#576 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:38,618 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f060cd7daba84854a9059e9f038dcdd6 is 50, key is test_row_0/A:col10/1732778617898/Put/seqid=0 2024-11-28T07:23:38,627 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#577 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:38,628 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/121b091d7f114defa466e26c7b9aeac3 is 50, key is test_row_0/B:col10/1732778617898/Put/seqid=0 2024-11-28T07:23:38,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742493_1669 (size=13697) 2024-11-28T07:23:38,679 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/f060cd7daba84854a9059e9f038dcdd6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f060cd7daba84854a9059e9f038dcdd6 2024-11-28T07:23:38,684 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into f060cd7daba84854a9059e9f038dcdd6(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:38,684 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:38,684 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=13, startTime=1732778618587; duration=0sec 2024-11-28T07:23:38,684 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:38,684 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:38,684 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:38,685 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:38,685 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:38,685 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,685 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/52b055feb5cc487aa55af9ba0b67a489, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8597b86a02bc4bf4b4780ec8f7d9ccd5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a1b4522a6af441b697fc003ed3ee9032] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=37.3 K 2024-11-28T07:23:38,686 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52b055feb5cc487aa55af9ba0b67a489, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=549, earliestPutTs=1732778615929 2024-11-28T07:23:38,686 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8597b86a02bc4bf4b4780ec8f7d9ccd5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=565, earliestPutTs=1732778617105 2024-11-28T07:23:38,687 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1b4522a6af441b697fc003ed3ee9032, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=590, earliestPutTs=1732778617885 2024-11-28T07:23:38,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742494_1670 (size=13697) 2024-11-28T07:23:38,692 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:38,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-11-28T07:23:38,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:38,693 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T07:23:38,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:38,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:38,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:38,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:38,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:38,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:38,714 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#578 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:38,715 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/a23959d6b90d41488cc161d3c33115df is 50, key is test_row_0/C:col10/1732778617898/Put/seqid=0 2024-11-28T07:23:38,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/cc0ee53b0c5a440f9a099ae1f863d4a5 is 50, key is test_row_0/A:col10/1732778618078/Put/seqid=0 2024-11-28T07:23:38,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:38,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. as already flushing 2024-11-28T07:23:38,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742495_1671 (size=13663) 2024-11-28T07:23:38,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742496_1672 (size=12301) 2024-11-28T07:23:38,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 319 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778678826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778678827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,936 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 321 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778678931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:38,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778678935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:39,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55634 deadline: 1732778679039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:39,093 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/121b091d7f114defa466e26c7b9aeac3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/121b091d7f114defa466e26c7b9aeac3 2024-11-28T07:23:39,100 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 121b091d7f114defa466e26c7b9aeac3(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:39,100 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:39,100 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=13, startTime=1732778618588; duration=0sec 2024-11-28T07:23:39,100 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:39,100 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:39,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 323 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778679139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:39,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778679141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:39,181 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/a23959d6b90d41488cc161d3c33115df as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a23959d6b90d41488cc161d3c33115df 2024-11-28T07:23:39,189 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into a23959d6b90d41488cc161d3c33115df(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:39,189 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:39,189 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=13, startTime=1732778618588; duration=0sec 2024-11-28T07:23:39,189 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:39,189 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:39,198 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=605 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/cc0ee53b0c5a440f9a099ae1f863d4a5 2024-11-28T07:23:39,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/35452a3c70294cda91c6000dc7094b79 is 50, key is test_row_0/B:col10/1732778618078/Put/seqid=0 2024-11-28T07:23:39,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742497_1673 (size=12301) 2024-11-28T07:23:39,249 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=605 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/35452a3c70294cda91c6000dc7094b79 2024-11-28T07:23:39,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/b420a6fc96e64ef6adc27761c9c60041 is 50, key is test_row_0/C:col10/1732778618078/Put/seqid=0 2024-11-28T07:23:39,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742498_1674 (size=12301) 2024-11-28T07:23:39,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:39,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55644 deadline: 1732778679314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:39,319 DEBUG [Thread-2396 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4227 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:39,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:39,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55694 deadline: 1732778679320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:39,326 DEBUG [Thread-2392 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4231 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., hostname=592d8b721726,33143,1732778474488, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:39,344 DEBUG [Thread-2407 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x496fe03f to 127.0.0.1:56318 2024-11-28T07:23:39,344 DEBUG [Thread-2407 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:39,345 DEBUG [Thread-2403 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d672ed2 to 127.0.0.1:56318 2024-11-28T07:23:39,345 DEBUG [Thread-2403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:39,346 DEBUG [Thread-2409 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3652e74d to 127.0.0.1:56318 2024-11-28T07:23:39,346 DEBUG [Thread-2409 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:39,349 DEBUG [Thread-2411 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2405c04e to 127.0.0.1:56318 2024-11-28T07:23:39,349 DEBUG [Thread-2411 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:39,350 DEBUG [Thread-2405 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cf40102 to 127.0.0.1:56318 2024-11-28T07:23:39,350 DEBUG [Thread-2405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:39,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-28T07:23:39,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:39,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:39,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55666 deadline: 1732778679446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:39,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 325 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55702 deadline: 1732778679446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:39,713 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=605 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/b420a6fc96e64ef6adc27761c9c60041 2024-11-28T07:23:39,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/cc0ee53b0c5a440f9a099ae1f863d4a5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cc0ee53b0c5a440f9a099ae1f863d4a5 2024-11-28T07:23:39,719 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cc0ee53b0c5a440f9a099ae1f863d4a5, entries=150, sequenceid=605, filesize=12.0 K 2024-11-28T07:23:39,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/35452a3c70294cda91c6000dc7094b79 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/35452a3c70294cda91c6000dc7094b79 2024-11-28T07:23:39,721 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/35452a3c70294cda91c6000dc7094b79, entries=150, sequenceid=605, filesize=12.0 K 2024-11-28T07:23:39,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/b420a6fc96e64ef6adc27761c9c60041 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b420a6fc96e64ef6adc27761c9c60041 2024-11-28T07:23:39,724 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b420a6fc96e64ef6adc27761c9c60041, entries=150, sequenceid=605, filesize=12.0 K 2024-11-28T07:23:39,725 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e310f48e6ef0ed637c2d62fa297701bf in 1033ms, sequenceid=605, compaction requested=false 2024-11-28T07:23:39,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:39,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:39,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=157 2024-11-28T07:23:39,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=157 2024-11-28T07:23:39,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-28T07:23:39,727 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4140 sec 2024-11-28T07:23:39,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees in 2.4180 sec 2024-11-28T07:23:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:39,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:23:39,949 DEBUG [Thread-2394 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f142b04 to 127.0.0.1:56318 2024-11-28T07:23:39,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:39,949 DEBUG [Thread-2394 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:39,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:39,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:39,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:39,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:39,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:39,951 DEBUG [Thread-2398 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4414259d to 127.0.0.1:56318 2024-11-28T07:23:39,951 DEBUG [Thread-2398 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:39,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/9f2c0f86301b407e9d300d434eab2a13 is 50, key is test_row_0/A:col10/1732778619947/Put/seqid=0 2024-11-28T07:23:39,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742499_1675 (size=12301) 2024-11-28T07:23:40,051 DEBUG [Thread-2400 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ed69825 to 127.0.0.1:56318 2024-11-28T07:23:40,051 DEBUG [Thread-2400 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:40,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=631 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/9f2c0f86301b407e9d300d434eab2a13 2024-11-28T07:23:40,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/1ef9fc90c2104f4499e888a3e07bbdc3 is 50, key is test_row_0/B:col10/1732778619947/Put/seqid=0 2024-11-28T07:23:40,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742500_1676 (size=12301) 2024-11-28T07:23:40,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=631 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/1ef9fc90c2104f4499e888a3e07bbdc3 2024-11-28T07:23:40,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/07417bf314a3440eb05d628dd47ad395 is 50, key is test_row_0/C:col10/1732778619947/Put/seqid=0 2024-11-28T07:23:40,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742501_1677 (size=12301) 2024-11-28T07:23:41,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=631 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/07417bf314a3440eb05d628dd47ad395 2024-11-28T07:23:41,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/9f2c0f86301b407e9d300d434eab2a13 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/9f2c0f86301b407e9d300d434eab2a13 2024-11-28T07:23:41,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/9f2c0f86301b407e9d300d434eab2a13, entries=150, sequenceid=631, filesize=12.0 K 2024-11-28T07:23:41,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/1ef9fc90c2104f4499e888a3e07bbdc3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/1ef9fc90c2104f4499e888a3e07bbdc3 2024-11-28T07:23:41,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/1ef9fc90c2104f4499e888a3e07bbdc3, entries=150, sequenceid=631, filesize=12.0 K 2024-11-28T07:23:41,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/07417bf314a3440eb05d628dd47ad395 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/07417bf314a3440eb05d628dd47ad395 2024-11-28T07:23:41,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/07417bf314a3440eb05d628dd47ad395, entries=150, sequenceid=631, filesize=12.0 K 2024-11-28T07:23:41,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=13.42 KB/13740 for e310f48e6ef0ed637c2d62fa297701bf in 1237ms, sequenceid=631, compaction requested=true 2024-11-28T07:23:41,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:41,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:41,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:41,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:41,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:41,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e310f48e6ef0ed637c2d62fa297701bf:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:41,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:41,186 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:41,186 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:41,186 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:41,186 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:41,186 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/B is initiating minor compaction (all files) 2024-11-28T07:23:41,186 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/A is initiating minor compaction (all files) 2024-11-28T07:23:41,186 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/B in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:41,186 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/A in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:41,186 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/121b091d7f114defa466e26c7b9aeac3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/35452a3c70294cda91c6000dc7094b79, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/1ef9fc90c2104f4499e888a3e07bbdc3] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=37.4 K 2024-11-28T07:23:41,186 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f060cd7daba84854a9059e9f038dcdd6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cc0ee53b0c5a440f9a099ae1f863d4a5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/9f2c0f86301b407e9d300d434eab2a13] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=37.4 K 2024-11-28T07:23:41,187 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 121b091d7f114defa466e26c7b9aeac3, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=590, earliestPutTs=1732778617885 2024-11-28T07:23:41,187 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f060cd7daba84854a9059e9f038dcdd6, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=590, earliestPutTs=1732778617885 2024-11-28T07:23:41,187 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 35452a3c70294cda91c6000dc7094b79, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=605, earliestPutTs=1732778618064 2024-11-28T07:23:41,187 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc0ee53b0c5a440f9a099ae1f863d4a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=605, earliestPutTs=1732778618064 2024-11-28T07:23:41,187 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ef9fc90c2104f4499e888a3e07bbdc3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=631, earliestPutTs=1732778618824 2024-11-28T07:23:41,187 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f2c0f86301b407e9d300d434eab2a13, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=631, earliestPutTs=1732778618824 2024-11-28T07:23:41,193 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#B#compaction#586 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:41,193 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#A#compaction#585 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:41,193 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/0b4a2bf7bf204268bd0fa439188d8120 is 50, key is test_row_0/B:col10/1732778619947/Put/seqid=0 2024-11-28T07:23:41,193 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/22cc83f06a8548c5bbf07d5b9c0014f3 is 50, key is test_row_0/A:col10/1732778619947/Put/seqid=0 2024-11-28T07:23:41,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742502_1678 (size=13799) 2024-11-28T07:23:41,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742503_1679 (size=13799) 2024-11-28T07:23:41,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-11-28T07:23:41,417 INFO [Thread-2402 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 156 completed 2024-11-28T07:23:41,600 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/0b4a2bf7bf204268bd0fa439188d8120 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/0b4a2bf7bf204268bd0fa439188d8120 2024-11-28T07:23:41,600 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/22cc83f06a8548c5bbf07d5b9c0014f3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/22cc83f06a8548c5bbf07d5b9c0014f3 2024-11-28T07:23:41,603 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/A of e310f48e6ef0ed637c2d62fa297701bf into 22cc83f06a8548c5bbf07d5b9c0014f3(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:41,603 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:41,603 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/B of e310f48e6ef0ed637c2d62fa297701bf into 0b4a2bf7bf204268bd0fa439188d8120(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:41,603 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/A, priority=13, startTime=1732778621186; duration=0sec 2024-11-28T07:23:41,603 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:41,603 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/B, priority=13, startTime=1732778621186; duration=0sec 2024-11-28T07:23:41,603 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:41,603 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:41,603 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:A 2024-11-28T07:23:41,603 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:B 2024-11-28T07:23:41,603 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:41,604 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:41,604 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): e310f48e6ef0ed637c2d62fa297701bf/C is initiating minor compaction (all files) 2024-11-28T07:23:41,604 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e310f48e6ef0ed637c2d62fa297701bf/C in TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:41,604 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a23959d6b90d41488cc161d3c33115df, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b420a6fc96e64ef6adc27761c9c60041, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/07417bf314a3440eb05d628dd47ad395] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp, totalSize=37.4 K 2024-11-28T07:23:41,604 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting a23959d6b90d41488cc161d3c33115df, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=590, earliestPutTs=1732778617885 2024-11-28T07:23:41,605 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b420a6fc96e64ef6adc27761c9c60041, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=605, earliestPutTs=1732778618064 2024-11-28T07:23:41,605 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07417bf314a3440eb05d628dd47ad395, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=631, earliestPutTs=1732778618824 2024-11-28T07:23:41,610 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e310f48e6ef0ed637c2d62fa297701bf#C#compaction#587 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:41,610 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/33f544c7503a4761abba638ce63f8fbf is 50, key is test_row_0/C:col10/1732778619947/Put/seqid=0 2024-11-28T07:23:41,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742504_1680 (size=13765) 2024-11-28T07:23:42,017 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/33f544c7503a4761abba638ce63f8fbf as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/33f544c7503a4761abba638ce63f8fbf 2024-11-28T07:23:42,020 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e310f48e6ef0ed637c2d62fa297701bf/C of e310f48e6ef0ed637c2d62fa297701bf into 33f544c7503a4761abba638ce63f8fbf(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:42,020 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:42,021 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf., storeName=e310f48e6ef0ed637c2d62fa297701bf/C, priority=13, startTime=1732778621186; duration=0sec 2024-11-28T07:23:42,021 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:42,021 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e310f48e6ef0ed637c2d62fa297701bf:C 2024-11-28T07:23:42,847 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T07:23:43,326 DEBUG [Thread-2396 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0de9f076 to 127.0.0.1:56318 2024-11-28T07:23:43,326 DEBUG [Thread-2396 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:43,352 DEBUG [Thread-2392 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79b10416 to 127.0.0.1:56318 2024-11-28T07:23:43,352 DEBUG [Thread-2392 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 126 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 160 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1329 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3987 rows 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1349 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4047 rows 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1337 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4011 rows 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1327 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3981 rows 2024-11-28T07:23:43,352 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1332 2024-11-28T07:23:43,353 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3996 rows 2024-11-28T07:23:43,353 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T07:23:43,353 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c60eb7d to 127.0.0.1:56318 2024-11-28T07:23:43,353 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:23:43,354 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T07:23:43,355 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T07:23:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-28T07:23:43,358 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778623358"}]},"ts":"1732778623358"} 2024-11-28T07:23:43,359 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T07:23:43,361 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T07:23:43,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T07:23:43,363 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e310f48e6ef0ed637c2d62fa297701bf, UNASSIGN}] 2024-11-28T07:23:43,363 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e310f48e6ef0ed637c2d62fa297701bf, UNASSIGN 2024-11-28T07:23:43,363 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=e310f48e6ef0ed637c2d62fa297701bf, regionState=CLOSING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:23:43,364 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T07:23:43,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:23:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-28T07:23:43,515 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:43,516 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing e310f48e6ef0ed637c2d62fa297701bf, disabling compactions & flushes 2024-11-28T07:23:43,516 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. after waiting 0 ms 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:43,516 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(2837): Flushing e310f48e6ef0ed637c2d62fa297701bf 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=A 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=B 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:43,516 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e310f48e6ef0ed637c2d62fa297701bf, store=C 2024-11-28T07:23:43,517 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:43,520 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/e789ba7032d14e7abf84493534caa8bd is 50, key is test_row_0/A:col10/1732778623351/Put/seqid=0 2024-11-28T07:23:43,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742505_1681 (size=9857) 2024-11-28T07:23:43,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-28T07:23:43,924 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=641 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/e789ba7032d14e7abf84493534caa8bd 2024-11-28T07:23:43,928 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e42b276346ca4005b4a925428c702130 is 50, key is test_row_0/B:col10/1732778623351/Put/seqid=0 2024-11-28T07:23:43,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742506_1682 (size=9857) 2024-11-28T07:23:43,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-28T07:23:44,332 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=641 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e42b276346ca4005b4a925428c702130 2024-11-28T07:23:44,337 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/3853f232d5c444058fd5bf4ac2221928 is 50, key is test_row_0/C:col10/1732778623351/Put/seqid=0 2024-11-28T07:23:44,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742507_1683 (size=9857) 2024-11-28T07:23:44,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-28T07:23:44,740 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=641 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/3853f232d5c444058fd5bf4ac2221928 2024-11-28T07:23:44,743 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/A/e789ba7032d14e7abf84493534caa8bd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e789ba7032d14e7abf84493534caa8bd 2024-11-28T07:23:44,746 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e789ba7032d14e7abf84493534caa8bd, entries=100, sequenceid=641, filesize=9.6 K 2024-11-28T07:23:44,746 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/B/e42b276346ca4005b4a925428c702130 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e42b276346ca4005b4a925428c702130 2024-11-28T07:23:44,749 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e42b276346ca4005b4a925428c702130, entries=100, sequenceid=641, filesize=9.6 K 2024-11-28T07:23:44,749 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/.tmp/C/3853f232d5c444058fd5bf4ac2221928 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3853f232d5c444058fd5bf4ac2221928 2024-11-28T07:23:44,752 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3853f232d5c444058fd5bf4ac2221928, entries=100, sequenceid=641, filesize=9.6 K 2024-11-28T07:23:44,752 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for e310f48e6ef0ed637c2d62fa297701bf in 1236ms, sequenceid=641, compaction requested=false 2024-11-28T07:23:44,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d718fdef598f44458a4e97498bdeb8dc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e977324341394668beaf9a73bfc2fdd7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b0a401a1a7db461786689486e1ee286d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/ac3ef87817a140cebab675912479d3b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/906ebfe46f8a4c038fec60c6c688b81d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/32c776427a2b48cd9d5c38b835845c68, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b4639a717b87410aae36970418f311ce, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d788da05f63247c8905ef67419983e8c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f4c25c9da0bc4ead8360d115c3dd4e1f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1857c6be4aa8451c9aa9c36e871711c0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f9884ebf6ba54cb793bbdc7f56b5acde, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1814c6265802486ca3ed6aa9474fca51, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/194fe0b611b14f179bb3087fef53f036, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c354bc0f8af4c7cbfec1578e07ae599, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15e861db42fb49c1a8989e2914de9065, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f87a834b6fba4f99b4fccbaef7175012, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/c449eac2a306449c8a9fde567e4e9256, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8ee555b059ef4483b703641259493d5c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/7e983934b86643f29559319abf8d0ad1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15ee75e9aff241c7b5abf5b30f09276f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a6953fb775104e2b88905d714ccc2327, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/143f32a1717d40e78eb8de147e1a1bf4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/716582cd7097407490f77ae09dd27833, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cb580e542c014fa5b45af1f69bbb7f62, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/69a3c404da9641349462dbbe029d322e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/bb8e56fca4f74db49c27d67a6a1e4453, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/5b09acf357314624891c1b2cbadeb598, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a8755b0d97134821bf7ca5cd9987d756, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/4fc197208b704092bae39b60e9c17c55, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e1c114f9f7124c42a68b5ef5d8909c58, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c5e00285b0c4bb7b91bff661dd6549a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/191e8d4d8ff74501883f971dab566cc5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/6531aff7a5e6446da6cad314fb0ea754, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d4b69dedca5c4a1abfac95cf34f68fa3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/de3149705edc48538a17fa54f2b2177b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f200d62b7e804f4fbdf5456e5f70fc3e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d61fbe940b4b474d8dc1484af201b0f3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/08d8034714ea48dd83e447c337fabd07, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/948e11bd836341809d3ec36318598707, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/824587a45c1d4d02bcdf772b449f8472, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f060cd7daba84854a9059e9f038dcdd6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/95498c4b619f4b61a668a2ae1bb701e1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cc0ee53b0c5a440f9a099ae1f863d4a5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/9f2c0f86301b407e9d300d434eab2a13] to archive 2024-11-28T07:23:44,754 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:23:44,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d718fdef598f44458a4e97498bdeb8dc to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d718fdef598f44458a4e97498bdeb8dc 2024-11-28T07:23:44,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e977324341394668beaf9a73bfc2fdd7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e977324341394668beaf9a73bfc2fdd7 2024-11-28T07:23:44,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b0a401a1a7db461786689486e1ee286d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b0a401a1a7db461786689486e1ee286d 2024-11-28T07:23:44,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/ac3ef87817a140cebab675912479d3b5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/ac3ef87817a140cebab675912479d3b5 2024-11-28T07:23:44,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/906ebfe46f8a4c038fec60c6c688b81d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/906ebfe46f8a4c038fec60c6c688b81d 2024-11-28T07:23:44,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/32c776427a2b48cd9d5c38b835845c68 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/32c776427a2b48cd9d5c38b835845c68 2024-11-28T07:23:44,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b4639a717b87410aae36970418f311ce to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/b4639a717b87410aae36970418f311ce 2024-11-28T07:23:44,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d788da05f63247c8905ef67419983e8c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d788da05f63247c8905ef67419983e8c 2024-11-28T07:23:44,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f4c25c9da0bc4ead8360d115c3dd4e1f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f4c25c9da0bc4ead8360d115c3dd4e1f 2024-11-28T07:23:44,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1857c6be4aa8451c9aa9c36e871711c0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1857c6be4aa8451c9aa9c36e871711c0 2024-11-28T07:23:44,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f9884ebf6ba54cb793bbdc7f56b5acde to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f9884ebf6ba54cb793bbdc7f56b5acde 2024-11-28T07:23:44,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1814c6265802486ca3ed6aa9474fca51 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/1814c6265802486ca3ed6aa9474fca51 2024-11-28T07:23:44,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/194fe0b611b14f179bb3087fef53f036 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/194fe0b611b14f179bb3087fef53f036 2024-11-28T07:23:44,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c354bc0f8af4c7cbfec1578e07ae599 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c354bc0f8af4c7cbfec1578e07ae599 2024-11-28T07:23:44,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15e861db42fb49c1a8989e2914de9065 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15e861db42fb49c1a8989e2914de9065 2024-11-28T07:23:44,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f87a834b6fba4f99b4fccbaef7175012 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f87a834b6fba4f99b4fccbaef7175012 2024-11-28T07:23:44,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/c449eac2a306449c8a9fde567e4e9256 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/c449eac2a306449c8a9fde567e4e9256 2024-11-28T07:23:44,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8ee555b059ef4483b703641259493d5c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8ee555b059ef4483b703641259493d5c 2024-11-28T07:23:44,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/7e983934b86643f29559319abf8d0ad1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/7e983934b86643f29559319abf8d0ad1 2024-11-28T07:23:44,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15ee75e9aff241c7b5abf5b30f09276f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/15ee75e9aff241c7b5abf5b30f09276f 2024-11-28T07:23:44,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a6953fb775104e2b88905d714ccc2327 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a6953fb775104e2b88905d714ccc2327 2024-11-28T07:23:44,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/143f32a1717d40e78eb8de147e1a1bf4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/143f32a1717d40e78eb8de147e1a1bf4 2024-11-28T07:23:44,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/716582cd7097407490f77ae09dd27833 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/716582cd7097407490f77ae09dd27833 2024-11-28T07:23:44,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cb580e542c014fa5b45af1f69bbb7f62 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cb580e542c014fa5b45af1f69bbb7f62 2024-11-28T07:23:44,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/69a3c404da9641349462dbbe029d322e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/69a3c404da9641349462dbbe029d322e 2024-11-28T07:23:44,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/bb8e56fca4f74db49c27d67a6a1e4453 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/bb8e56fca4f74db49c27d67a6a1e4453 2024-11-28T07:23:44,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/5b09acf357314624891c1b2cbadeb598 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/5b09acf357314624891c1b2cbadeb598 2024-11-28T07:23:44,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a8755b0d97134821bf7ca5cd9987d756 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/a8755b0d97134821bf7ca5cd9987d756 2024-11-28T07:23:44,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/4fc197208b704092bae39b60e9c17c55 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/4fc197208b704092bae39b60e9c17c55 2024-11-28T07:23:44,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e1c114f9f7124c42a68b5ef5d8909c58 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e1c114f9f7124c42a68b5ef5d8909c58 2024-11-28T07:23:44,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c5e00285b0c4bb7b91bff661dd6549a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/8c5e00285b0c4bb7b91bff661dd6549a 2024-11-28T07:23:44,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/191e8d4d8ff74501883f971dab566cc5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/191e8d4d8ff74501883f971dab566cc5 2024-11-28T07:23:44,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/6531aff7a5e6446da6cad314fb0ea754 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/6531aff7a5e6446da6cad314fb0ea754 2024-11-28T07:23:44,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d4b69dedca5c4a1abfac95cf34f68fa3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d4b69dedca5c4a1abfac95cf34f68fa3 2024-11-28T07:23:44,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/de3149705edc48538a17fa54f2b2177b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/de3149705edc48538a17fa54f2b2177b 2024-11-28T07:23:44,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f200d62b7e804f4fbdf5456e5f70fc3e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f200d62b7e804f4fbdf5456e5f70fc3e 2024-11-28T07:23:44,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d61fbe940b4b474d8dc1484af201b0f3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/d61fbe940b4b474d8dc1484af201b0f3 2024-11-28T07:23:44,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/08d8034714ea48dd83e447c337fabd07 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/08d8034714ea48dd83e447c337fabd07 2024-11-28T07:23:44,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/948e11bd836341809d3ec36318598707 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/948e11bd836341809d3ec36318598707 2024-11-28T07:23:44,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/824587a45c1d4d02bcdf772b449f8472 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/824587a45c1d4d02bcdf772b449f8472 2024-11-28T07:23:44,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f060cd7daba84854a9059e9f038dcdd6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/f060cd7daba84854a9059e9f038dcdd6 2024-11-28T07:23:44,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/95498c4b619f4b61a668a2ae1bb701e1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/95498c4b619f4b61a668a2ae1bb701e1 2024-11-28T07:23:44,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cc0ee53b0c5a440f9a099ae1f863d4a5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/cc0ee53b0c5a440f9a099ae1f863d4a5 2024-11-28T07:23:44,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/9f2c0f86301b407e9d300d434eab2a13 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/9f2c0f86301b407e9d300d434eab2a13 2024-11-28T07:23:44,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ca5d0ba68b664462ab7cebe966d601f5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a3c02d4f4b1f4ac49d67e406d7e206b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/664d9b04452342dc8885e2bbf035979b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9055bcab639d43f9b87eea227f55969a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/917a1269e14a4bb3bc5db0fcaac475b7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a46801e1820b45daaee63a8d826cda52, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/3449d53d057c44aaa4f1804ee19ca668, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e6ce7e583ba049188dcce0f501be9c6e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/86f29501a2a5406ba5fc3a7b574f065a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/312a95afa4c24de0b9f60ea7a93e5955, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/39c44b60b0d44cd5b850e61ad6d88bd7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/c50e787c2af9438b990440f8ed1ad9ee, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/f7efc4f5bd204e3b82813442ba3657ae, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9a9beb6b8d9d4351b27446f60c5b21e3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/44bab056680d4026bdb5ef47d1d8645c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/d2949e8e6e074457bad3407f95d18f77, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/47df76a3f2c844debe8e9c9ba55ea2a4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e958e64433bd4b2c8593ea0c4aed84c4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/5e6537cbaf184d6980956b602841d41c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/8888da23e54e4ad39110166ed865190b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9dbbba67f441417e936d2ebc75602cc3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e028d4d7b8e241d6bbe96cf6d32dc131, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/7c1b6f2fd4cf4dadbddc61d529ae21b6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/381588250f46472ebec7002739a471f1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/41c1371e65d14a778d7a3463c79c1034, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/722e70918fd34ce3b837c1bb9f039821, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/4f5887ce02ce409dbc93f830341a878e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2c8fd193bfd246a69ddd6b316ef460ae, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ce607fde247a4d6c90dee085a79f3ddb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/15546ca9eeb1429aa4329b45c045e359, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/b4771e376fa2406190971489f6cac041, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a02fb947142f4750a3f370546888f22c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/630e2d3282734db7861dcaf1092e70af, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/eb721a700ec04c27bf52daa6c825746d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9602b7ef1aa146f9a85b63e02126d731, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/08ffb326b9bc4d5cb5201130073471d0, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/73392b9e00d748dba1cb2d1f5e235153, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/34966c2e1004446f82074d2d7a175de1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/56c60ad60afd4579a2d2934a0d4ca4a4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/97805e5e588b4573a95f28011dc0e1aa, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/121b091d7f114defa466e26c7b9aeac3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2a330d1b30d748deaa1ac658d23e1d96, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/35452a3c70294cda91c6000dc7094b79, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/1ef9fc90c2104f4499e888a3e07bbdc3] to archive 2024-11-28T07:23:44,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:23:44,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ca5d0ba68b664462ab7cebe966d601f5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ca5d0ba68b664462ab7cebe966d601f5 2024-11-28T07:23:44,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a3c02d4f4b1f4ac49d67e406d7e206b5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a3c02d4f4b1f4ac49d67e406d7e206b5 2024-11-28T07:23:44,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/664d9b04452342dc8885e2bbf035979b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/664d9b04452342dc8885e2bbf035979b 2024-11-28T07:23:44,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9055bcab639d43f9b87eea227f55969a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9055bcab639d43f9b87eea227f55969a 2024-11-28T07:23:44,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/917a1269e14a4bb3bc5db0fcaac475b7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/917a1269e14a4bb3bc5db0fcaac475b7 2024-11-28T07:23:44,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a46801e1820b45daaee63a8d826cda52 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a46801e1820b45daaee63a8d826cda52 2024-11-28T07:23:44,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/3449d53d057c44aaa4f1804ee19ca668 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/3449d53d057c44aaa4f1804ee19ca668 2024-11-28T07:23:44,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e6ce7e583ba049188dcce0f501be9c6e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e6ce7e583ba049188dcce0f501be9c6e 2024-11-28T07:23:44,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/86f29501a2a5406ba5fc3a7b574f065a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/86f29501a2a5406ba5fc3a7b574f065a 2024-11-28T07:23:44,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/312a95afa4c24de0b9f60ea7a93e5955 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/312a95afa4c24de0b9f60ea7a93e5955 2024-11-28T07:23:44,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/39c44b60b0d44cd5b850e61ad6d88bd7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/39c44b60b0d44cd5b850e61ad6d88bd7 2024-11-28T07:23:44,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/c50e787c2af9438b990440f8ed1ad9ee to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/c50e787c2af9438b990440f8ed1ad9ee 2024-11-28T07:23:44,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/f7efc4f5bd204e3b82813442ba3657ae to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/f7efc4f5bd204e3b82813442ba3657ae 2024-11-28T07:23:44,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9a9beb6b8d9d4351b27446f60c5b21e3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9a9beb6b8d9d4351b27446f60c5b21e3 2024-11-28T07:23:44,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/44bab056680d4026bdb5ef47d1d8645c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/44bab056680d4026bdb5ef47d1d8645c 2024-11-28T07:23:44,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/d2949e8e6e074457bad3407f95d18f77 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/d2949e8e6e074457bad3407f95d18f77 2024-11-28T07:23:44,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/47df76a3f2c844debe8e9c9ba55ea2a4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/47df76a3f2c844debe8e9c9ba55ea2a4 2024-11-28T07:23:44,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e958e64433bd4b2c8593ea0c4aed84c4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e958e64433bd4b2c8593ea0c4aed84c4 2024-11-28T07:23:44,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/5e6537cbaf184d6980956b602841d41c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/5e6537cbaf184d6980956b602841d41c 2024-11-28T07:23:44,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/8888da23e54e4ad39110166ed865190b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/8888da23e54e4ad39110166ed865190b 2024-11-28T07:23:44,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9dbbba67f441417e936d2ebc75602cc3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9dbbba67f441417e936d2ebc75602cc3 2024-11-28T07:23:44,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e028d4d7b8e241d6bbe96cf6d32dc131 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e028d4d7b8e241d6bbe96cf6d32dc131 2024-11-28T07:23:44,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/7c1b6f2fd4cf4dadbddc61d529ae21b6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/7c1b6f2fd4cf4dadbddc61d529ae21b6 2024-11-28T07:23:44,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/381588250f46472ebec7002739a471f1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/381588250f46472ebec7002739a471f1 2024-11-28T07:23:44,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/41c1371e65d14a778d7a3463c79c1034 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/41c1371e65d14a778d7a3463c79c1034 2024-11-28T07:23:44,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/722e70918fd34ce3b837c1bb9f039821 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/722e70918fd34ce3b837c1bb9f039821 2024-11-28T07:23:44,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/4f5887ce02ce409dbc93f830341a878e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/4f5887ce02ce409dbc93f830341a878e 2024-11-28T07:23:44,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2c8fd193bfd246a69ddd6b316ef460ae to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2c8fd193bfd246a69ddd6b316ef460ae 2024-11-28T07:23:44,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ce607fde247a4d6c90dee085a79f3ddb to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/ce607fde247a4d6c90dee085a79f3ddb 2024-11-28T07:23:44,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/15546ca9eeb1429aa4329b45c045e359 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/15546ca9eeb1429aa4329b45c045e359 2024-11-28T07:23:44,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/b4771e376fa2406190971489f6cac041 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/b4771e376fa2406190971489f6cac041 2024-11-28T07:23:44,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a02fb947142f4750a3f370546888f22c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/a02fb947142f4750a3f370546888f22c 2024-11-28T07:23:44,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/630e2d3282734db7861dcaf1092e70af to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/630e2d3282734db7861dcaf1092e70af 2024-11-28T07:23:44,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/eb721a700ec04c27bf52daa6c825746d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/eb721a700ec04c27bf52daa6c825746d 2024-11-28T07:23:44,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9602b7ef1aa146f9a85b63e02126d731 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/9602b7ef1aa146f9a85b63e02126d731 2024-11-28T07:23:44,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/08ffb326b9bc4d5cb5201130073471d0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/08ffb326b9bc4d5cb5201130073471d0 2024-11-28T07:23:44,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/73392b9e00d748dba1cb2d1f5e235153 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/73392b9e00d748dba1cb2d1f5e235153 2024-11-28T07:23:44,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/34966c2e1004446f82074d2d7a175de1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/34966c2e1004446f82074d2d7a175de1 2024-11-28T07:23:44,818 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/56c60ad60afd4579a2d2934a0d4ca4a4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/56c60ad60afd4579a2d2934a0d4ca4a4 2024-11-28T07:23:44,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/97805e5e588b4573a95f28011dc0e1aa to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/97805e5e588b4573a95f28011dc0e1aa 2024-11-28T07:23:44,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/121b091d7f114defa466e26c7b9aeac3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/121b091d7f114defa466e26c7b9aeac3 2024-11-28T07:23:44,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2a330d1b30d748deaa1ac658d23e1d96 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/2a330d1b30d748deaa1ac658d23e1d96 2024-11-28T07:23:44,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/35452a3c70294cda91c6000dc7094b79 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/35452a3c70294cda91c6000dc7094b79 2024-11-28T07:23:44,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/1ef9fc90c2104f4499e888a3e07bbdc3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/1ef9fc90c2104f4499e888a3e07bbdc3 2024-11-28T07:23:44,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e92917998fad4d088abc2dcdb2d9bca8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2bbfc000e8254be98fad63aabd6d5491, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1e4e5eb633454ed89199a25535de0082, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2f42b8d601744ec790399bdf3d3064f1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/faed1a3445db4e5db43a4dd5bf0ebf56, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/eed3ec97cea74d37bd462264f8b5e3d1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/24a1164fee154546a0d297017b812d48, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/fabda253d29b40b08ac83e747763eab1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/82349ec0d854476a8508a347fd7cf9da, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d852f14bd80e4a1ab82abc801a18daaf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/aa418ae16cac47dbb95e4f37a1c0beb2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cdeb664d776d41a3bf624792bec53e78, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1ef21a1c434c4420a2bffaa6869d2d37, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/85d6f97be55a4d658f15ba6f1860dd70, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5b10ea1bc6324c5892d8fbba46260086, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/18e9c20b1f10419f8a4c332b3aad5774, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8d7f0bcd5b8e43ba8799e2cb1112114e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cb015ec5fee042e38083b441cd4dbe7a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b943d4620e4b4a42ba42c98e1c3efb4b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/dc78d1211ed340d588a9462408220cb6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/571849bac85d4791ae9d57ce2021dd41, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1923aa2162644f638f00d08c2f20afb3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/4060a04976774d129e8a571a1f2b6b2d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d7316d4d41284e3bae1d09639f4c8c32, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/ff146b548c274b60985a43a47215e6f7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e8d19ba24ea44d4f90aae8febebbea78, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5697e67e06c64cae999983847a3281b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/164b232ddc214a4d8a8ecb8bcdfe7112, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a60abae6dd9543d3b1639e988ffb7ae1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3cf8610ca71040f4be6b8fceaf058b4f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d41c0054baa14c709326d29a6a5a44d7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b478b84fa71840a6989d67e92f04634d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/6de8b052746e4d3fb224e0c828499d62, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d39e8c2a6e4845258fea7ed548f5c71d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d9f7aaf467914c64b92840ced5495861, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/0e7eb22fc18f4ff4b0ad514df7a88c41, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/52b055feb5cc487aa55af9ba0b67a489, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/007d2a56e55d4d4a894dd0aa015436c7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8597b86a02bc4bf4b4780ec8f7d9ccd5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a23959d6b90d41488cc161d3c33115df, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a1b4522a6af441b697fc003ed3ee9032, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b420a6fc96e64ef6adc27761c9c60041, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/07417bf314a3440eb05d628dd47ad395] to archive 2024-11-28T07:23:44,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:23:44,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e92917998fad4d088abc2dcdb2d9bca8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e92917998fad4d088abc2dcdb2d9bca8 2024-11-28T07:23:44,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2bbfc000e8254be98fad63aabd6d5491 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2bbfc000e8254be98fad63aabd6d5491 2024-11-28T07:23:44,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1e4e5eb633454ed89199a25535de0082 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1e4e5eb633454ed89199a25535de0082 2024-11-28T07:23:44,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2f42b8d601744ec790399bdf3d3064f1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/2f42b8d601744ec790399bdf3d3064f1 2024-11-28T07:23:44,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/faed1a3445db4e5db43a4dd5bf0ebf56 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/faed1a3445db4e5db43a4dd5bf0ebf56 2024-11-28T07:23:44,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/eed3ec97cea74d37bd462264f8b5e3d1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/eed3ec97cea74d37bd462264f8b5e3d1 2024-11-28T07:23:44,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/24a1164fee154546a0d297017b812d48 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/24a1164fee154546a0d297017b812d48 2024-11-28T07:23:44,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/fabda253d29b40b08ac83e747763eab1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/fabda253d29b40b08ac83e747763eab1 2024-11-28T07:23:44,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/82349ec0d854476a8508a347fd7cf9da to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/82349ec0d854476a8508a347fd7cf9da 2024-11-28T07:23:44,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d852f14bd80e4a1ab82abc801a18daaf to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d852f14bd80e4a1ab82abc801a18daaf 2024-11-28T07:23:44,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/aa418ae16cac47dbb95e4f37a1c0beb2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/aa418ae16cac47dbb95e4f37a1c0beb2 2024-11-28T07:23:44,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cdeb664d776d41a3bf624792bec53e78 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cdeb664d776d41a3bf624792bec53e78 2024-11-28T07:23:44,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1ef21a1c434c4420a2bffaa6869d2d37 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1ef21a1c434c4420a2bffaa6869d2d37 2024-11-28T07:23:44,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/85d6f97be55a4d658f15ba6f1860dd70 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/85d6f97be55a4d658f15ba6f1860dd70 2024-11-28T07:23:44,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5b10ea1bc6324c5892d8fbba46260086 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5b10ea1bc6324c5892d8fbba46260086 2024-11-28T07:23:44,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/18e9c20b1f10419f8a4c332b3aad5774 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/18e9c20b1f10419f8a4c332b3aad5774 2024-11-28T07:23:44,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8d7f0bcd5b8e43ba8799e2cb1112114e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8d7f0bcd5b8e43ba8799e2cb1112114e 2024-11-28T07:23:44,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cb015ec5fee042e38083b441cd4dbe7a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/cb015ec5fee042e38083b441cd4dbe7a 2024-11-28T07:23:44,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b943d4620e4b4a42ba42c98e1c3efb4b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b943d4620e4b4a42ba42c98e1c3efb4b 2024-11-28T07:23:44,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/dc78d1211ed340d588a9462408220cb6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/dc78d1211ed340d588a9462408220cb6 2024-11-28T07:23:44,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/571849bac85d4791ae9d57ce2021dd41 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/571849bac85d4791ae9d57ce2021dd41 2024-11-28T07:23:44,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1923aa2162644f638f00d08c2f20afb3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/1923aa2162644f638f00d08c2f20afb3 2024-11-28T07:23:44,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/4060a04976774d129e8a571a1f2b6b2d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/4060a04976774d129e8a571a1f2b6b2d 2024-11-28T07:23:44,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d7316d4d41284e3bae1d09639f4c8c32 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d7316d4d41284e3bae1d09639f4c8c32 2024-11-28T07:23:44,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/ff146b548c274b60985a43a47215e6f7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/ff146b548c274b60985a43a47215e6f7 2024-11-28T07:23:44,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e8d19ba24ea44d4f90aae8febebbea78 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/e8d19ba24ea44d4f90aae8febebbea78 2024-11-28T07:23:44,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5697e67e06c64cae999983847a3281b5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/5697e67e06c64cae999983847a3281b5 2024-11-28T07:23:44,843 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/164b232ddc214a4d8a8ecb8bcdfe7112 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/164b232ddc214a4d8a8ecb8bcdfe7112 2024-11-28T07:23:44,844 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a60abae6dd9543d3b1639e988ffb7ae1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a60abae6dd9543d3b1639e988ffb7ae1 2024-11-28T07:23:44,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3cf8610ca71040f4be6b8fceaf058b4f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3cf8610ca71040f4be6b8fceaf058b4f 2024-11-28T07:23:44,845 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d41c0054baa14c709326d29a6a5a44d7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d41c0054baa14c709326d29a6a5a44d7 2024-11-28T07:23:44,846 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b478b84fa71840a6989d67e92f04634d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b478b84fa71840a6989d67e92f04634d 2024-11-28T07:23:44,847 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/6de8b052746e4d3fb224e0c828499d62 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/6de8b052746e4d3fb224e0c828499d62 2024-11-28T07:23:44,848 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d39e8c2a6e4845258fea7ed548f5c71d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d39e8c2a6e4845258fea7ed548f5c71d 2024-11-28T07:23:44,848 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d9f7aaf467914c64b92840ced5495861 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/d9f7aaf467914c64b92840ced5495861 2024-11-28T07:23:44,849 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/0e7eb22fc18f4ff4b0ad514df7a88c41 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/0e7eb22fc18f4ff4b0ad514df7a88c41 2024-11-28T07:23:44,850 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/52b055feb5cc487aa55af9ba0b67a489 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/52b055feb5cc487aa55af9ba0b67a489 2024-11-28T07:23:44,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/007d2a56e55d4d4a894dd0aa015436c7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/007d2a56e55d4d4a894dd0aa015436c7 2024-11-28T07:23:44,851 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8597b86a02bc4bf4b4780ec8f7d9ccd5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/8597b86a02bc4bf4b4780ec8f7d9ccd5 2024-11-28T07:23:44,852 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a23959d6b90d41488cc161d3c33115df to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a23959d6b90d41488cc161d3c33115df 2024-11-28T07:23:44,853 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a1b4522a6af441b697fc003ed3ee9032 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/a1b4522a6af441b697fc003ed3ee9032 2024-11-28T07:23:44,854 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b420a6fc96e64ef6adc27761c9c60041 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/b420a6fc96e64ef6adc27761c9c60041 2024-11-28T07:23:44,854 DEBUG [StoreCloser-TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/07417bf314a3440eb05d628dd47ad395 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/07417bf314a3440eb05d628dd47ad395 2024-11-28T07:23:44,857 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/recovered.edits/644.seqid, newMaxSeqId=644, maxSeqId=1 2024-11-28T07:23:44,858 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf. 2024-11-28T07:23:44,858 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for e310f48e6ef0ed637c2d62fa297701bf: 2024-11-28T07:23:44,859 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:44,859 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=e310f48e6ef0ed637c2d62fa297701bf, regionState=CLOSED 2024-11-28T07:23:44,861 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-28T07:23:44,861 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure e310f48e6ef0ed637c2d62fa297701bf, server=592d8b721726,33143,1732778474488 in 1.4960 sec 2024-11-28T07:23:44,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-28T07:23:44,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e310f48e6ef0ed637c2d62fa297701bf, UNASSIGN in 1.4980 sec 2024-11-28T07:23:44,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-28T07:23:44,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5000 sec 2024-11-28T07:23:44,864 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778624864"}]},"ts":"1732778624864"} 2024-11-28T07:23:44,865 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T07:23:44,867 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T07:23:44,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5120 sec 2024-11-28T07:23:45,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-28T07:23:45,461 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-11-28T07:23:45,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T07:23:45,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:45,463 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=162, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:45,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-11-28T07:23:45,464 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=162, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:45,465 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:45,467 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/recovered.edits] 2024-11-28T07:23:45,469 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/22cc83f06a8548c5bbf07d5b9c0014f3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/22cc83f06a8548c5bbf07d5b9c0014f3 2024-11-28T07:23:45,470 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e789ba7032d14e7abf84493534caa8bd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/A/e789ba7032d14e7abf84493534caa8bd 2024-11-28T07:23:45,472 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/0b4a2bf7bf204268bd0fa439188d8120 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/0b4a2bf7bf204268bd0fa439188d8120 2024-11-28T07:23:45,473 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e42b276346ca4005b4a925428c702130 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/B/e42b276346ca4005b4a925428c702130 2024-11-28T07:23:45,474 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/33f544c7503a4761abba638ce63f8fbf to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/33f544c7503a4761abba638ce63f8fbf 2024-11-28T07:23:45,475 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3853f232d5c444058fd5bf4ac2221928 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/C/3853f232d5c444058fd5bf4ac2221928 2024-11-28T07:23:45,477 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/recovered.edits/644.seqid to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf/recovered.edits/644.seqid 2024-11-28T07:23:45,478 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/e310f48e6ef0ed637c2d62fa297701bf 2024-11-28T07:23:45,478 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T07:23:45,480 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=162, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:45,481 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T07:23:45,482 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T07:23:45,483 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=162, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:45,483 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T07:23:45,483 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732778625483"}]},"ts":"9223372036854775807"} 2024-11-28T07:23:45,485 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T07:23:45,485 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e310f48e6ef0ed637c2d62fa297701bf, NAME => 'TestAcidGuarantees,,1732778597081.e310f48e6ef0ed637c2d62fa297701bf.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T07:23:45,485 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T07:23:45,485 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732778625485"}]},"ts":"9223372036854775807"} 2024-11-28T07:23:45,486 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T07:23:45,488 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=162, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:45,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 26 msec 2024-11-28T07:23:45,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-11-28T07:23:45,564 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 162 completed 2024-11-28T07:23:45,575 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=242 (was 238) - Thread LEAK? -, OpenFileDescriptor=458 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=650 (was 640) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4311 (was 4380) 2024-11-28T07:23:45,584 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=242, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=650, ProcessCount=11, AvailableMemoryMB=4310 2024-11-28T07:23:45,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T07:23:45,586 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:23:45,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:45,587 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T07:23:45,587 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:45,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 163 2024-11-28T07:23:45,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T07:23:45,588 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T07:23:45,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742508_1684 (size=960) 2024-11-28T07:23:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T07:23:45,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T07:23:45,994 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e 2024-11-28T07:23:45,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742509_1685 (size=53) 2024-11-28T07:23:46,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T07:23:46,399 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:23:46,399 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a6b84436e6ee345d2d4f94cd524e48a2, disabling compactions & flushes 2024-11-28T07:23:46,399 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:46,399 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:46,399 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. after waiting 0 ms 2024-11-28T07:23:46,399 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:46,399 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:46,399 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:46,400 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T07:23:46,401 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732778626400"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732778626400"}]},"ts":"1732778626400"} 2024-11-28T07:23:46,401 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T07:23:46,402 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T07:23:46,402 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778626402"}]},"ts":"1732778626402"} 2024-11-28T07:23:46,403 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T07:23:46,407 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, ASSIGN}] 2024-11-28T07:23:46,408 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, ASSIGN 2024-11-28T07:23:46,409 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, ASSIGN; state=OFFLINE, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=false 2024-11-28T07:23:46,559 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=a6b84436e6ee345d2d4f94cd524e48a2, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:23:46,560 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; OpenRegionProcedure a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:23:46,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T07:23:46,711 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:46,714 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:46,714 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:23:46,714 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:46,714 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:23:46,714 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:46,714 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:46,715 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:46,716 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:23:46,717 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6b84436e6ee345d2d4f94cd524e48a2 columnFamilyName A 2024-11-28T07:23:46,717 DEBUG [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:46,717 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(327): Store=a6b84436e6ee345d2d4f94cd524e48a2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:23:46,717 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:46,718 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:23:46,718 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6b84436e6ee345d2d4f94cd524e48a2 columnFamilyName B 2024-11-28T07:23:46,718 DEBUG [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:46,718 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(327): Store=a6b84436e6ee345d2d4f94cd524e48a2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:23:46,718 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:46,719 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:23:46,719 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6b84436e6ee345d2d4f94cd524e48a2 columnFamilyName C 2024-11-28T07:23:46,719 DEBUG [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:46,719 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(327): Store=a6b84436e6ee345d2d4f94cd524e48a2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:23:46,719 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:46,720 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:46,720 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:46,721 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:23:46,722 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:46,723 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T07:23:46,723 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened a6b84436e6ee345d2d4f94cd524e48a2; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65904821, jitterRate=-0.017941638827323914}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:23:46,724 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:46,724 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., pid=165, masterSystemTime=1732778626711 2024-11-28T07:23:46,725 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:46,725 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:46,726 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=a6b84436e6ee345d2d4f94cd524e48a2, regionState=OPEN, openSeqNum=2, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:23:46,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-11-28T07:23:46,727 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; OpenRegionProcedure a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 in 166 msec 2024-11-28T07:23:46,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-28T07:23:46,728 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, ASSIGN in 320 msec 2024-11-28T07:23:46,729 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T07:23:46,729 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778626729"}]},"ts":"1732778626729"} 2024-11-28T07:23:46,729 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T07:23:46,732 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=163, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T07:23:46,733 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1460 sec 2024-11-28T07:23:47,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T07:23:47,691 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-28T07:23:47,692 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x537a66f8 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ac53e79 2024-11-28T07:23:47,700 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d5efb7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:47,701 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:47,702 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:47,703 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T07:23:47,704 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54088, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T07:23:47,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T07:23:47,705 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T07:23:47,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=166, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-28T07:23:47,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742510_1686 (size=996) 2024-11-28T07:23:48,114 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-28T07:23:48,115 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-28T07:23:48,116 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T07:23:48,118 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, REOPEN/MOVE}] 2024-11-28T07:23:48,118 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, REOPEN/MOVE 2024-11-28T07:23:48,119 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=a6b84436e6ee345d2d4f94cd524e48a2, regionState=CLOSING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,120 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T07:23:48,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE; CloseRegionProcedure a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:23:48,271 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:48,271 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(124): Close a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,271 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T07:23:48,271 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1681): Closing a6b84436e6ee345d2d4f94cd524e48a2, disabling compactions & flushes 2024-11-28T07:23:48,271 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,271 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,271 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. after waiting 0 ms 2024-11-28T07:23:48,271 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,274 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-28T07:23:48,275 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,275 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1635): Region close journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:48,275 WARN [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegionServer(3786): Not adding moved region record: a6b84436e6ee345d2d4f94cd524e48a2 to self. 2024-11-28T07:23:48,276 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(170): Closed a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,276 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=a6b84436e6ee345d2d4f94cd524e48a2, regionState=CLOSED 2024-11-28T07:23:48,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-11-28T07:23:48,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; CloseRegionProcedure a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 in 157 msec 2024-11-28T07:23:48,278 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=168, ppid=167, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, REOPEN/MOVE; state=CLOSED, location=592d8b721726,33143,1732778474488; forceNewPlan=false, retain=true 2024-11-28T07:23:48,429 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=a6b84436e6ee345d2d4f94cd524e48a2, regionState=OPENING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,430 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=168, state=RUNNABLE; OpenRegionProcedure a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:23:48,581 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:48,584 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,584 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7285): Opening region: {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} 2024-11-28T07:23:48,584 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,584 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T07:23:48,584 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7327): checking encryption for a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,584 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(7330): checking classloading for a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,585 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,586 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:23:48,586 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6b84436e6ee345d2d4f94cd524e48a2 columnFamilyName A 2024-11-28T07:23:48,587 DEBUG [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:48,587 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(327): Store=a6b84436e6ee345d2d4f94cd524e48a2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:23:48,587 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,588 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:23:48,588 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6b84436e6ee345d2d4f94cd524e48a2 columnFamilyName B 2024-11-28T07:23:48,588 DEBUG [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:48,588 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(327): Store=a6b84436e6ee345d2d4f94cd524e48a2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:23:48,589 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,589 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T07:23:48,589 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6b84436e6ee345d2d4f94cd524e48a2 columnFamilyName C 2024-11-28T07:23:48,589 DEBUG [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:48,589 INFO [StoreOpener-a6b84436e6ee345d2d4f94cd524e48a2-1 {}] regionserver.HStore(327): Store=a6b84436e6ee345d2d4f94cd524e48a2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T07:23:48,590 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,590 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,591 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,592 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T07:23:48,593 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1085): writing seq id for a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,593 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1102): Opened a6b84436e6ee345d2d4f94cd524e48a2; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62212208, jitterRate=-0.07296586036682129}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T07:23:48,594 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegion(1001): Region open journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:48,594 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., pid=170, masterSystemTime=1732778628581 2024-11-28T07:23:48,595 DEBUG [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,595 INFO [RS_OPEN_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_OPEN_REGION, pid=170}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,596 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=168 updating hbase:meta row=a6b84436e6ee345d2d4f94cd524e48a2, regionState=OPEN, openSeqNum=5, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=168 2024-11-28T07:23:48,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=168, state=SUCCESS; OpenRegionProcedure a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 in 166 msec 2024-11-28T07:23:48,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-28T07:23:48,598 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, REOPEN/MOVE in 479 msec 2024-11-28T07:23:48,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=167, resume processing ppid=166 2024-11-28T07:23:48,599 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, ppid=166, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 482 msec 2024-11-28T07:23:48,601 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 895 msec 2024-11-28T07:23:48,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=166 2024-11-28T07:23:48,603 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06094c70 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc9c3e 2024-11-28T07:23:48,607 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc332d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,607 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x103dfc6e to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7181df3b 2024-11-28T07:23:48,610 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17327621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,611 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e047c09 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11030ef5 2024-11-28T07:23:48,614 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1584f18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,615 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-11-28T07:23:48,617 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,618 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-11-28T07:23:48,627 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,628 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7846cb78 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@150e08ed 2024-11-28T07:23:48,639 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53305d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,640 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f1754bc to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a3b66d3 2024-11-28T07:23:48,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb6288a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,658 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d9113f3 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5cfdf76c 2024-11-28T07:23:48,670 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6556601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,671 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bb75907 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68c2838a 2024-11-28T07:23:48,678 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458a85fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,678 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c1d3a95 to 127.0.0.1:56318 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50bf224f 2024-11-28T07:23:48,683 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@410bf0c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T07:23:48,686 DEBUG [hconnection-0x18c633cc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,686 DEBUG [hconnection-0x10c74bef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,687 DEBUG [hconnection-0x64af4da5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,687 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,687 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36994, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,688 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,688 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:48,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-28T07:23:48,690 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:48,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T07:23:48,690 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:48,691 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:48,704 DEBUG [hconnection-0x878eb9b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,704 DEBUG [hconnection-0x1f4c7954-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,705 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37028, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,708 DEBUG [hconnection-0x2fe12d51-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,709 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37038, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,709 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,723 DEBUG [hconnection-0x4e6a9853-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,724 DEBUG [hconnection-0x4e6c48b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,724 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,726 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,727 DEBUG [hconnection-0x52b7fd13-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,728 DEBUG [hconnection-0x52e8d950-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T07:23:48,728 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,729 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T07:23:48,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:23:48,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:48,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:48,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:48,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:48,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:48,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:48,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778688758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778688759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778688759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778688761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778688761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288c6c27b18f5e4a2ebb4493a9c062f877_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778628732/Put/seqid=0 2024-11-28T07:23:48,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742511_1687 (size=12154) 2024-11-28T07:23:48,777 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:48,781 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288c6c27b18f5e4a2ebb4493a9c062f877_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288c6c27b18f5e4a2ebb4493a9c062f877_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:48,782 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/31e636694cee417690f25a3be92cc005, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:48,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/31e636694cee417690f25a3be92cc005 is 175, key is test_row_0/A:col10/1732778628732/Put/seqid=0 2024-11-28T07:23:48,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742512_1688 (size=30955) 2024-11-28T07:23:48,789 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/31e636694cee417690f25a3be92cc005 2024-11-28T07:23:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T07:23:48,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/a5ae23e52aa54e08b4553fa30d6de102 is 50, key is test_row_0/B:col10/1732778628732/Put/seqid=0 2024-11-28T07:23:48,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742513_1689 (size=12001) 2024-11-28T07:23:48,842 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:48,842 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-28T07:23:48,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:48,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:48,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:48,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:48,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:48,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778688862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778688862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778688862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:48,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778688864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778688864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:48,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T07:23:48,994 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:48,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-28T07:23:48,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:49,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:49,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:49,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:49,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:49,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:49,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778689069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778689069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778689069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778689069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778689069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,152 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:49,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-28T07:23:49,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:49,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:49,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:49,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:49,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:49,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:49,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/a5ae23e52aa54e08b4553fa30d6de102 2024-11-28T07:23:49,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/b3330bcc900049f682ba3ab9d5bf4cd1 is 50, key is test_row_0/C:col10/1732778628732/Put/seqid=0 2024-11-28T07:23:49,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742514_1690 (size=12001) 2024-11-28T07:23:49,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/b3330bcc900049f682ba3ab9d5bf4cd1 2024-11-28T07:23:49,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/31e636694cee417690f25a3be92cc005 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31e636694cee417690f25a3be92cc005 2024-11-28T07:23:49,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31e636694cee417690f25a3be92cc005, entries=150, sequenceid=17, filesize=30.2 K 2024-11-28T07:23:49,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/a5ae23e52aa54e08b4553fa30d6de102 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a5ae23e52aa54e08b4553fa30d6de102 2024-11-28T07:23:49,282 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a5ae23e52aa54e08b4553fa30d6de102, entries=150, sequenceid=17, filesize=11.7 K 2024-11-28T07:23:49,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/b3330bcc900049f682ba3ab9d5bf4cd1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b3330bcc900049f682ba3ab9d5bf4cd1 2024-11-28T07:23:49,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b3330bcc900049f682ba3ab9d5bf4cd1, entries=150, sequenceid=17, filesize=11.7 K 2024-11-28T07:23:49,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for a6b84436e6ee345d2d4f94cd524e48a2 in 558ms, sequenceid=17, compaction requested=false 2024-11-28T07:23:49,292 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-28T07:23:49,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:49,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T07:23:49,305 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:49,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-28T07:23:49,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:49,306 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:23:49,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:49,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:49,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:49,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:49,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:49,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:49,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f483562d43014e91acf13c9432d86738_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778628759/Put/seqid=0 2024-11-28T07:23:49,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742515_1691 (size=12154) 2024-11-28T07:23:49,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,320 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f483562d43014e91acf13c9432d86738_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f483562d43014e91acf13c9432d86738_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:49,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/43f482cd42814b26be1ef1f20ea011f7, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:49,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/43f482cd42814b26be1ef1f20ea011f7 is 175, key is test_row_0/A:col10/1732778628759/Put/seqid=0 2024-11-28T07:23:49,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742516_1692 (size=30955) 2024-11-28T07:23:49,325 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/43f482cd42814b26be1ef1f20ea011f7 2024-11-28T07:23:49,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/dd0b4d43f859483aab4235a00a894462 is 50, key is test_row_0/B:col10/1732778628759/Put/seqid=0 2024-11-28T07:23:49,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742517_1693 (size=12001) 2024-11-28T07:23:49,337 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/dd0b4d43f859483aab4235a00a894462 2024-11-28T07:23:49,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/6a7cef1d195344bdbd75155b74c82b29 is 50, key is test_row_0/C:col10/1732778628759/Put/seqid=0 2024-11-28T07:23:49,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742518_1694 (size=12001) 2024-11-28T07:23:49,358 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/6a7cef1d195344bdbd75155b74c82b29 2024-11-28T07:23:49,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/43f482cd42814b26be1ef1f20ea011f7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43f482cd42814b26be1ef1f20ea011f7 2024-11-28T07:23:49,366 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43f482cd42814b26be1ef1f20ea011f7, entries=150, sequenceid=41, filesize=30.2 K 2024-11-28T07:23:49,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/dd0b4d43f859483aab4235a00a894462 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dd0b4d43f859483aab4235a00a894462 2024-11-28T07:23:49,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,370 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dd0b4d43f859483aab4235a00a894462, entries=150, sequenceid=41, filesize=11.7 K 2024-11-28T07:23:49,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/6a7cef1d195344bdbd75155b74c82b29 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6a7cef1d195344bdbd75155b74c82b29 2024-11-28T07:23:49,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,379 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6a7cef1d195344bdbd75155b74c82b29, entries=150, sequenceid=41, filesize=11.7 K 2024-11-28T07:23:49,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,380 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for a6b84436e6ee345d2d4f94cd524e48a2 in 74ms, sequenceid=41, compaction requested=false 2024-11-28T07:23:49,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:49,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:49,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-28T07:23:49,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-28T07:23:49,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-28T07:23:49,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 691 msec 2024-11-28T07:23:49,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 695 msec 2024-11-28T07:23:49,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:49,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:23:49,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:49,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:49,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128bab5612127f04633be7997b345ad9bd7_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778629444/Put/seqid=0 2024-11-28T07:23:49,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742519_1695 (size=19474) 2024-11-28T07:23:49,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778689475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778689475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778689477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778689476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778689478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778689581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778689581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778689581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778689581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778689581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778689784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778689785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778689785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778689785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:49,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778689784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:49,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T07:23:49,795 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-28T07:23:49,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:49,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-28T07:23:49,799 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:49,799 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:49,799 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:49,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T07:23:49,874 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:49,877 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128bab5612127f04633be7997b345ad9bd7_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128bab5612127f04633be7997b345ad9bd7_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:49,878 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/eacd89be5faf4aa29c2448d07283620c, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:49,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/eacd89be5faf4aa29c2448d07283620c is 175, key is test_row_0/A:col10/1732778629444/Put/seqid=0 2024-11-28T07:23:49,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742520_1696 (size=56733) 2024-11-28T07:23:49,887 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/eacd89be5faf4aa29c2448d07283620c 2024-11-28T07:23:49,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/7fc7d0b05274446e8a73607669e5e88a is 50, key is test_row_0/B:col10/1732778629444/Put/seqid=0 2024-11-28T07:23:49,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T07:23:49,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742521_1697 (size=12001) 2024-11-28T07:23:49,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/7fc7d0b05274446e8a73607669e5e88a 2024-11-28T07:23:49,908 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/0f920456ccc343f3a3595e7b5c87498c is 50, key is test_row_0/C:col10/1732778629444/Put/seqid=0 2024-11-28T07:23:49,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742522_1698 (size=12001) 2024-11-28T07:23:49,950 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:49,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-28T07:23:49,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:49,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:49,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:49,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:49,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:49,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:50,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778690088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778690090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778690091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778690093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778690095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T07:23:50,103 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:50,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-28T07:23:50,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:50,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:50,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:50,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:50,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:50,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:50,255 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:50,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-28T07:23:50,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:50,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:50,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:50,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:50,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:50,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:50,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/0f920456ccc343f3a3595e7b5c87498c 2024-11-28T07:23:50,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/eacd89be5faf4aa29c2448d07283620c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eacd89be5faf4aa29c2448d07283620c 2024-11-28T07:23:50,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eacd89be5faf4aa29c2448d07283620c, entries=300, sequenceid=53, filesize=55.4 K 2024-11-28T07:23:50,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/7fc7d0b05274446e8a73607669e5e88a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7fc7d0b05274446e8a73607669e5e88a 2024-11-28T07:23:50,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7fc7d0b05274446e8a73607669e5e88a, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T07:23:50,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/0f920456ccc343f3a3595e7b5c87498c as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0f920456ccc343f3a3595e7b5c87498c 2024-11-28T07:23:50,343 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0f920456ccc343f3a3595e7b5c87498c, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T07:23:50,344 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a6b84436e6ee345d2d4f94cd524e48a2 in 900ms, sequenceid=53, compaction requested=true 2024-11-28T07:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:50,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:50,344 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:50,344 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:50,345 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:50,345 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:50,345 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:23:50,345 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:23:50,345 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:50,345 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:50,345 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31e636694cee417690f25a3be92cc005, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43f482cd42814b26be1ef1f20ea011f7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eacd89be5faf4aa29c2448d07283620c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=115.9 K 2024-11-28T07:23:50,345 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a5ae23e52aa54e08b4553fa30d6de102, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dd0b4d43f859483aab4235a00a894462, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7fc7d0b05274446e8a73607669e5e88a] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=35.2 K 2024-11-28T07:23:50,345 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:50,346 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31e636694cee417690f25a3be92cc005, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43f482cd42814b26be1ef1f20ea011f7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eacd89be5faf4aa29c2448d07283620c] 2024-11-28T07:23:50,346 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a5ae23e52aa54e08b4553fa30d6de102, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732778628732 2024-11-28T07:23:50,346 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31e636694cee417690f25a3be92cc005, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732778628732 2024-11-28T07:23:50,346 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43f482cd42814b26be1ef1f20ea011f7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732778628756 2024-11-28T07:23:50,346 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting dd0b4d43f859483aab4235a00a894462, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732778628756 2024-11-28T07:23:50,346 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting eacd89be5faf4aa29c2448d07283620c, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778629421 2024-11-28T07:23:50,346 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fc7d0b05274446e8a73607669e5e88a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778629441 2024-11-28T07:23:50,351 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:50,352 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#600 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:50,353 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/c063a972e3a74962b751d7d38aea67a8 is 50, key is test_row_0/B:col10/1732778629444/Put/seqid=0 2024-11-28T07:23:50,353 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128fcf60f6575ad45209fc4773b95eec350_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:50,355 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128fcf60f6575ad45209fc4773b95eec350_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:50,356 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128fcf60f6575ad45209fc4773b95eec350_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:50,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742523_1699 (size=12104) 2024-11-28T07:23:50,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742524_1700 (size=4469) 2024-11-28T07:23:50,362 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#601 average throughput is 2.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:50,363 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/d9742d5bb5cc4b8aab1c86378fe223ed is 175, key is test_row_0/A:col10/1732778629444/Put/seqid=0 2024-11-28T07:23:50,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742525_1701 (size=31058) 2024-11-28T07:23:50,372 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/d9742d5bb5cc4b8aab1c86378fe223ed as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d9742d5bb5cc4b8aab1c86378fe223ed 2024-11-28T07:23:50,376 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into d9742d5bb5cc4b8aab1c86378fe223ed(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:50,376 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:50,376 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=13, startTime=1732778630344; duration=0sec 2024-11-28T07:23:50,377 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:50,377 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:23:50,377 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:50,377 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:50,378 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:23:50,378 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:50,378 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b3330bcc900049f682ba3ab9d5bf4cd1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6a7cef1d195344bdbd75155b74c82b29, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0f920456ccc343f3a3595e7b5c87498c] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=35.2 K 2024-11-28T07:23:50,378 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3330bcc900049f682ba3ab9d5bf4cd1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732778628732 2024-11-28T07:23:50,378 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a7cef1d195344bdbd75155b74c82b29, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732778628756 2024-11-28T07:23:50,379 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f920456ccc343f3a3595e7b5c87498c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778629441 2024-11-28T07:23:50,385 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T07:23:50,386 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#602 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:50,387 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/6fed1a396a3548d2be7988de0a1c3c66 is 50, key is test_row_0/C:col10/1732778629444/Put/seqid=0 2024-11-28T07:23:50,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T07:23:50,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742526_1702 (size=12104) 2024-11-28T07:23:50,408 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:50,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-28T07:23:50,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:50,409 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:23:50,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:50,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:50,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:50,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:50,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:50,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:50,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112813049d711da24cb59433fcda831e0178_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778629477/Put/seqid=0 2024-11-28T07:23:50,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742527_1703 (size=12154) 2024-11-28T07:23:50,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:50,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:50,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778690603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778690604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778690605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778690605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778690605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778690708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778690708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778690709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778690709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778690709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,763 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/c063a972e3a74962b751d7d38aea67a8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/c063a972e3a74962b751d7d38aea67a8 2024-11-28T07:23:50,767 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into c063a972e3a74962b751d7d38aea67a8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:50,767 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:50,767 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=13, startTime=1732778630344; duration=0sec 2024-11-28T07:23:50,767 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:50,767 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:23:50,809 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/6fed1a396a3548d2be7988de0a1c3c66 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6fed1a396a3548d2be7988de0a1c3c66 2024-11-28T07:23:50,812 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into 6fed1a396a3548d2be7988de0a1c3c66(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:50,812 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:50,812 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=13, startTime=1732778630344; duration=0sec 2024-11-28T07:23:50,813 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:50,813 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:23:50,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:50,830 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112813049d711da24cb59433fcda831e0178_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112813049d711da24cb59433fcda831e0178_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:50,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/64ad0040541747369e4b3134b1f2f0a5, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:50,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/64ad0040541747369e4b3134b1f2f0a5 is 175, key is test_row_0/A:col10/1732778629477/Put/seqid=0 2024-11-28T07:23:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742528_1704 (size=30955) 2024-11-28T07:23:50,835 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/64ad0040541747369e4b3134b1f2f0a5 2024-11-28T07:23:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1a07ad05677146819580e3d4c8f10478 is 50, key is test_row_0/B:col10/1732778629477/Put/seqid=0 2024-11-28T07:23:50,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742529_1705 (size=12001) 2024-11-28T07:23:50,844 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1a07ad05677146819580e3d4c8f10478 2024-11-28T07:23:50,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/de88311ff07c4380b86ad751f872cd45 is 50, key is test_row_0/C:col10/1732778629477/Put/seqid=0 2024-11-28T07:23:50,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742530_1706 (size=12001) 2024-11-28T07:23:50,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T07:23:50,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778690911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778690911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778690911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778690911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:50,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:50,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778690913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778691214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778691214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778691214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778691215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778691216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,287 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/de88311ff07c4380b86ad751f872cd45 2024-11-28T07:23:51,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/64ad0040541747369e4b3134b1f2f0a5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/64ad0040541747369e4b3134b1f2f0a5 2024-11-28T07:23:51,295 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/64ad0040541747369e4b3134b1f2f0a5, entries=150, sequenceid=78, filesize=30.2 K 2024-11-28T07:23:51,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1a07ad05677146819580e3d4c8f10478 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a07ad05677146819580e3d4c8f10478 2024-11-28T07:23:51,299 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a07ad05677146819580e3d4c8f10478, entries=150, sequenceid=78, filesize=11.7 K 2024-11-28T07:23:51,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/de88311ff07c4380b86ad751f872cd45 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/de88311ff07c4380b86ad751f872cd45 2024-11-28T07:23:51,303 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/de88311ff07c4380b86ad751f872cd45, entries=150, sequenceid=78, filesize=11.7 K 2024-11-28T07:23:51,303 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for a6b84436e6ee345d2d4f94cd524e48a2 in 894ms, sequenceid=78, compaction requested=false 2024-11-28T07:23:51,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:51,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:51,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-28T07:23:51,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-28T07:23:51,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-28T07:23:51,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5050 sec 2024-11-28T07:23:51,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.5090 sec 2024-11-28T07:23:51,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T07:23:51,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:51,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:51,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:51,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:51,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:51,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:51,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:51,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287cc49a50ceef489fa005948fb8a9108b_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778630604/Put/seqid=0 2024-11-28T07:23:51,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742531_1707 (size=12154) 2024-11-28T07:23:51,736 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:51,739 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287cc49a50ceef489fa005948fb8a9108b_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287cc49a50ceef489fa005948fb8a9108b_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:51,743 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4f2c1374cd62406b8f00928490e11b01, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:51,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4f2c1374cd62406b8f00928490e11b01 is 175, key is test_row_0/A:col10/1732778630604/Put/seqid=0 2024-11-28T07:23:51,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778691746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778691746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778691754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778691757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778691761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742532_1708 (size=30955) 2024-11-28T07:23:51,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778691855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778691855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778691860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778691860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:51,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778691863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:51,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T07:23:51,903 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-28T07:23:51,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-28T07:23:51,908 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T07:23:51,908 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:51,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:52,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T07:23:52,060 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:52,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-28T07:23:52,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:52,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778692059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778692060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778692064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778692064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778692067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,178 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4f2c1374cd62406b8f00928490e11b01 2024-11-28T07:23:52,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/839b524f0c7b4165a92ea4abe67915ec is 50, key is test_row_0/B:col10/1732778630604/Put/seqid=0 2024-11-28T07:23:52,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742533_1709 (size=12001) 2024-11-28T07:23:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T07:23:52,213 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:52,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-28T07:23:52,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:52,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,365 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:52,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-28T07:23:52,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778692363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:52,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778692368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778692368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778692368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778692371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T07:23:52,519 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:52,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-28T07:23:52,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:52,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/839b524f0c7b4165a92ea4abe67915ec 2024-11-28T07:23:52,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/5ef66c56ef234f2b9bcfbe57d228f1ab is 50, key is test_row_0/C:col10/1732778630604/Put/seqid=0 2024-11-28T07:23:52,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:52,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-28T07:23:52,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:52,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:52,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742534_1710 (size=12001) 2024-11-28T07:23:52,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/5ef66c56ef234f2b9bcfbe57d228f1ab 2024-11-28T07:23:52,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4f2c1374cd62406b8f00928490e11b01 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4f2c1374cd62406b8f00928490e11b01 2024-11-28T07:23:52,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4f2c1374cd62406b8f00928490e11b01, entries=150, sequenceid=96, filesize=30.2 K 2024-11-28T07:23:52,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/839b524f0c7b4165a92ea4abe67915ec as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/839b524f0c7b4165a92ea4abe67915ec 2024-11-28T07:23:52,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/839b524f0c7b4165a92ea4abe67915ec, entries=150, sequenceid=96, filesize=11.7 K 2024-11-28T07:23:52,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/5ef66c56ef234f2b9bcfbe57d228f1ab as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5ef66c56ef234f2b9bcfbe57d228f1ab 2024-11-28T07:23:52,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5ef66c56ef234f2b9bcfbe57d228f1ab, entries=150, sequenceid=96, filesize=11.7 K 2024-11-28T07:23:52,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for a6b84436e6ee345d2d4f94cd524e48a2 in 996ms, sequenceid=96, compaction requested=true 2024-11-28T07:23:52,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:52,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:52,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:52,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:52,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:52,716 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:52,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:52,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:23:52,717 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:52,718 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:52,718 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:52,718 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:23:52,718 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:23:52,718 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,718 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,718 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/c063a972e3a74962b751d7d38aea67a8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a07ad05677146819580e3d4c8f10478, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/839b524f0c7b4165a92ea4abe67915ec] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=35.3 K 2024-11-28T07:23:52,718 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d9742d5bb5cc4b8aab1c86378fe223ed, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/64ad0040541747369e4b3134b1f2f0a5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4f2c1374cd62406b8f00928490e11b01] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=90.8 K 2024-11-28T07:23:52,718 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,718 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d9742d5bb5cc4b8aab1c86378fe223ed, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/64ad0040541747369e4b3134b1f2f0a5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4f2c1374cd62406b8f00928490e11b01] 2024-11-28T07:23:52,718 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c063a972e3a74962b751d7d38aea67a8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778629441 2024-11-28T07:23:52,719 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9742d5bb5cc4b8aab1c86378fe223ed, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778629441 2024-11-28T07:23:52,719 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a07ad05677146819580e3d4c8f10478, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778629456 2024-11-28T07:23:52,720 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64ad0040541747369e4b3134b1f2f0a5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778629456 2024-11-28T07:23:52,720 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 839b524f0c7b4165a92ea4abe67915ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732778630604 2024-11-28T07:23:52,720 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f2c1374cd62406b8f00928490e11b01, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732778630604 2024-11-28T07:23:52,737 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:52,749 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#610 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:52,750 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/344e11f3a4ca41af859cf5795cbb4e57 is 50, key is test_row_0/B:col10/1732778630604/Put/seqid=0 2024-11-28T07:23:52,756 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112897272b5f9acf493fb1fe72f3fb11abf0_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:52,758 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112897272b5f9acf493fb1fe72f3fb11abf0_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:52,758 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112897272b5f9acf493fb1fe72f3fb11abf0_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:52,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742535_1711 (size=12207) 2024-11-28T07:23:52,810 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/344e11f3a4ca41af859cf5795cbb4e57 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/344e11f3a4ca41af859cf5795cbb4e57 2024-11-28T07:23:52,815 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into 344e11f3a4ca41af859cf5795cbb4e57(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:52,815 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:52,815 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=13, startTime=1732778632716; duration=0sec 2024-11-28T07:23:52,815 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:52,815 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:23:52,815 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:52,817 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:52,817 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:23:52,817 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,817 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6fed1a396a3548d2be7988de0a1c3c66, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/de88311ff07c4380b86ad751f872cd45, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5ef66c56ef234f2b9bcfbe57d228f1ab] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=35.3 K 2024-11-28T07:23:52,818 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fed1a396a3548d2be7988de0a1c3c66, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732778629441 2024-11-28T07:23:52,818 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting de88311ff07c4380b86ad751f872cd45, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732778629456 2024-11-28T07:23:52,818 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ef66c56ef234f2b9bcfbe57d228f1ab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732778630604 2024-11-28T07:23:52,824 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:52,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-28T07:23:52,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:52,825 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T07:23:52,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:52,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:52,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:52,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:52,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:52,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:52,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742536_1712 (size=4469) 2024-11-28T07:23:52,826 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#609 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:52,827 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/6006a79cc06b443ab11c5632b01d2db6 is 175, key is test_row_0/A:col10/1732778630604/Put/seqid=0 2024-11-28T07:23:52,834 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#611 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:52,835 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/94a8b8f9c71e4e7d901c9c8139001da5 is 50, key is test_row_0/C:col10/1732778630604/Put/seqid=0 2024-11-28T07:23:52,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:52,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742537_1713 (size=31161) 2024-11-28T07:23:52,884 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/6006a79cc06b443ab11c5632b01d2db6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6006a79cc06b443ab11c5632b01d2db6 2024-11-28T07:23:52,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742538_1714 (size=12207) 2024-11-28T07:23:52,893 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into 6006a79cc06b443ab11c5632b01d2db6(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:52,893 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:52,893 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=13, startTime=1732778632716; duration=0sec 2024-11-28T07:23:52,893 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:52,893 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:23:52,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778692888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778692891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112865ee6723288b45e39e955ff2e613325c_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778631750/Put/seqid=0 2024-11-28T07:23:52,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778692894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778692895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778692895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:52,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742539_1715 (size=12154) 2024-11-28T07:23:52,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:52,944 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112865ee6723288b45e39e955ff2e613325c_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112865ee6723288b45e39e955ff2e613325c_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:52,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/bd1a53a7906349f6b47c6684e37b9bd1, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:52,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/bd1a53a7906349f6b47c6684e37b9bd1 is 175, key is test_row_0/A:col10/1732778631750/Put/seqid=0 2024-11-28T07:23:52,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:52,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778692996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778692997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742540_1716 (size=30955) 2024-11-28T07:23:53,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778693001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778693002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778693003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T07:23:53,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778693201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778693202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778693206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778693207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778693207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,296 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/94a8b8f9c71e4e7d901c9c8139001da5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/94a8b8f9c71e4e7d901c9c8139001da5 2024-11-28T07:23:53,300 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into 94a8b8f9c71e4e7d901c9c8139001da5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:53,300 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:53,300 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=13, startTime=1732778632716; duration=0sec 2024-11-28T07:23:53,300 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:53,300 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:23:53,405 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/bd1a53a7906349f6b47c6684e37b9bd1 2024-11-28T07:23:53,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/7ca2426ba1a54cc49411de53e415d4f9 is 50, key is test_row_0/B:col10/1732778631750/Put/seqid=0 2024-11-28T07:23:53,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742541_1717 (size=12001) 2024-11-28T07:23:53,466 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/7ca2426ba1a54cc49411de53e415d4f9 2024-11-28T07:23:53,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/39044e5f22e344ce9034b38112b68b25 is 50, key is test_row_0/C:col10/1732778631750/Put/seqid=0 2024-11-28T07:23:53,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778693506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778693513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778693513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778693513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778693524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:53,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742542_1718 (size=12001) 2024-11-28T07:23:53,539 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/39044e5f22e344ce9034b38112b68b25 2024-11-28T07:23:53,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/bd1a53a7906349f6b47c6684e37b9bd1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/bd1a53a7906349f6b47c6684e37b9bd1 2024-11-28T07:23:53,552 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/bd1a53a7906349f6b47c6684e37b9bd1, entries=150, sequenceid=118, filesize=30.2 K 2024-11-28T07:23:53,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/7ca2426ba1a54cc49411de53e415d4f9 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7ca2426ba1a54cc49411de53e415d4f9 2024-11-28T07:23:53,561 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7ca2426ba1a54cc49411de53e415d4f9, entries=150, sequenceid=118, filesize=11.7 K 2024-11-28T07:23:53,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/39044e5f22e344ce9034b38112b68b25 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/39044e5f22e344ce9034b38112b68b25 2024-11-28T07:23:53,565 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/39044e5f22e344ce9034b38112b68b25, entries=150, sequenceid=118, filesize=11.7 K 2024-11-28T07:23:53,567 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for a6b84436e6ee345d2d4f94cd524e48a2 in 742ms, sequenceid=118, compaction requested=false 2024-11-28T07:23:53,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:53,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:53,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-28T07:23:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-28T07:23:53,569 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-28T07:23:53,569 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6600 sec 2024-11-28T07:23:53,571 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.6640 sec 2024-11-28T07:23:54,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-28T07:23:54,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:54,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:54,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:54,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:54,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:54,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:54,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:54,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T07:23:54,017 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-28T07:23:54,019 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:54,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-28T07:23:54,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T07:23:54,021 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:54,021 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:54,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:54,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f5fd9a43cbcb4557b626c3c0a8eb92ad_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778632893/Put/seqid=0 2024-11-28T07:23:54,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742543_1719 (size=14744) 2024-11-28T07:23:54,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778694088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778694086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778694088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778694093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778694093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T07:23:54,174 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:54,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T07:23:54,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:54,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,175 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778694195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778694195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778694195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778694199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778694200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T07:23:54,328 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:54,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T07:23:54,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:54,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778694399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778694401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778694401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778694404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778694417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,454 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:54,458 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f5fd9a43cbcb4557b626c3c0a8eb92ad_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f5fd9a43cbcb4557b626c3c0a8eb92ad_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:54,459 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/34b75f548b25474abdf8ade228a72e7f, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:54,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/34b75f548b25474abdf8ade228a72e7f is 175, key is test_row_0/A:col10/1732778632893/Put/seqid=0 2024-11-28T07:23:54,485 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:54,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T07:23:54,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:54,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742544_1720 (size=39699) 2024-11-28T07:23:54,504 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/34b75f548b25474abdf8ade228a72e7f 2024-11-28T07:23:54,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/b34ea5f6e6d24a67a66c0605ada397bf is 50, key is test_row_0/B:col10/1732778632893/Put/seqid=0 2024-11-28T07:23:54,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742545_1721 (size=12151) 2024-11-28T07:23:54,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/b34ea5f6e6d24a67a66c0605ada397bf 2024-11-28T07:23:54,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/b0a1fec7a6c74c7caa1c8f84ff87d423 is 50, key is test_row_0/C:col10/1732778632893/Put/seqid=0 2024-11-28T07:23:54,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742546_1722 (size=12151) 2024-11-28T07:23:54,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/b0a1fec7a6c74c7caa1c8f84ff87d423 2024-11-28T07:23:54,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T07:23:54,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/34b75f548b25474abdf8ade228a72e7f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/34b75f548b25474abdf8ade228a72e7f 2024-11-28T07:23:54,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/34b75f548b25474abdf8ade228a72e7f, entries=200, sequenceid=137, filesize=38.8 K 2024-11-28T07:23:54,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/b34ea5f6e6d24a67a66c0605ada397bf as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b34ea5f6e6d24a67a66c0605ada397bf 2024-11-28T07:23:54,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b34ea5f6e6d24a67a66c0605ada397bf, entries=150, sequenceid=137, filesize=11.9 K 2024-11-28T07:23:54,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/b0a1fec7a6c74c7caa1c8f84ff87d423 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b0a1fec7a6c74c7caa1c8f84ff87d423 2024-11-28T07:23:54,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:54,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T07:23:54,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:54,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b0a1fec7a6c74c7caa1c8f84ff87d423, entries=150, sequenceid=137, filesize=11.9 K 2024-11-28T07:23:54,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for a6b84436e6ee345d2d4f94cd524e48a2 in 646ms, sequenceid=137, compaction requested=true 2024-11-28T07:23:54,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:54,657 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:54,658 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:54,658 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:23:54,658 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,658 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6006a79cc06b443ab11c5632b01d2db6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/bd1a53a7906349f6b47c6684e37b9bd1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/34b75f548b25474abdf8ade228a72e7f] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=99.4 K 2024-11-28T07:23:54,658 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,658 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6006a79cc06b443ab11c5632b01d2db6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/bd1a53a7906349f6b47c6684e37b9bd1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/34b75f548b25474abdf8ade228a72e7f] 2024-11-28T07:23:54,659 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6006a79cc06b443ab11c5632b01d2db6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732778630604 2024-11-28T07:23:54,660 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd1a53a7906349f6b47c6684e37b9bd1, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732778631741 2024-11-28T07:23:54,660 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34b75f548b25474abdf8ade228a72e7f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732778632877 2024-11-28T07:23:54,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:54,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:54,661 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:54,662 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:54,662 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:23:54,662 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,662 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/344e11f3a4ca41af859cf5795cbb4e57, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7ca2426ba1a54cc49411de53e415d4f9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b34ea5f6e6d24a67a66c0605ada397bf] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=35.5 K 2024-11-28T07:23:54,662 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 344e11f3a4ca41af859cf5795cbb4e57, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732778630604 2024-11-28T07:23:54,663 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ca2426ba1a54cc49411de53e415d4f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732778631741 2024-11-28T07:23:54,663 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b34ea5f6e6d24a67a66c0605ada397bf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732778632893 2024-11-28T07:23:54,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:54,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:54,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:54,668 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:54,669 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:54,685 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411289dddd18584544a529049222d90b0d941_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:54,687 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411289dddd18584544a529049222d90b0d941_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:54,688 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289dddd18584544a529049222d90b0d941_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:54,702 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#619 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:54,702 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/12a90c69c5614496992d29380fa9a18b is 50, key is test_row_0/B:col10/1732778632893/Put/seqid=0 2024-11-28T07:23:54,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:54,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T07:23:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:54,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:54,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778694721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778694723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778694724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,733 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778694726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778694727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742547_1723 (size=4469) 2024-11-28T07:23:54,746 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#618 average throughput is 0.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:54,747 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/c7dd02a4ec4a4948aeaa2b7e62fbdd19 is 175, key is test_row_0/A:col10/1732778632893/Put/seqid=0 2024-11-28T07:23:54,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742548_1724 (size=12459) 2024-11-28T07:23:54,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d617d33d1e404ce4bc9ccba5e799e7a5_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778634083/Put/seqid=0 2024-11-28T07:23:54,763 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/12a90c69c5614496992d29380fa9a18b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/12a90c69c5614496992d29380fa9a18b 2024-11-28T07:23:54,771 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into 12a90c69c5614496992d29380fa9a18b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:54,771 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:54,771 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=13, startTime=1732778634661; duration=0sec 2024-11-28T07:23:54,772 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:54,772 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:23:54,772 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:54,774 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:54,774 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:23:54,775 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,775 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/94a8b8f9c71e4e7d901c9c8139001da5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/39044e5f22e344ce9034b38112b68b25, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b0a1fec7a6c74c7caa1c8f84ff87d423] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=35.5 K 2024-11-28T07:23:54,775 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 94a8b8f9c71e4e7d901c9c8139001da5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1732778630604 2024-11-28T07:23:54,775 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 39044e5f22e344ce9034b38112b68b25, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732778631741 2024-11-28T07:23:54,776 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b0a1fec7a6c74c7caa1c8f84ff87d423, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732778632893 2024-11-28T07:23:54,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742549_1725 (size=31413) 2024-11-28T07:23:54,804 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:54,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T07:23:54,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:54,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742550_1726 (size=17284) 2024-11-28T07:23:54,816 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#621 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:54,816 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/15a98f745ba04baabe9dcc02bb8616ba is 50, key is test_row_0/C:col10/1732778632893/Put/seqid=0 2024-11-28T07:23:54,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778694830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778694834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778694835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:54,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778694836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:54,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742551_1727 (size=12459) 2024-11-28T07:23:54,956 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:54,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T07:23:54,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:54,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:54,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:54,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:55,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:55,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778695035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:55,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778695039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:55,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778695039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:55,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778695040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:55,109 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:55,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T07:23:55,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:55,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:55,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:55,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:55,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:55,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:55,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T07:23:55,201 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/c7dd02a4ec4a4948aeaa2b7e62fbdd19 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c7dd02a4ec4a4948aeaa2b7e62fbdd19 2024-11-28T07:23:55,207 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,208 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into c7dd02a4ec4a4948aeaa2b7e62fbdd19(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:55,208 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:55,208 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=13, startTime=1732778634657; duration=0sec 2024-11-28T07:23:55,209 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:55,209 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:23:55,212 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d617d33d1e404ce4bc9ccba5e799e7a5_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128d617d33d1e404ce4bc9ccba5e799e7a5_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:55,213 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/057490e761ba49f09419748448d36857, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:55,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/057490e761ba49f09419748448d36857 is 175, key is test_row_0/A:col10/1732778634083/Put/seqid=0 2024-11-28T07:23:55,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:55,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778695228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:55,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742552_1728 (size=48389) 2024-11-28T07:23:55,252 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/057490e761ba49f09419748448d36857 2024-11-28T07:23:55,257 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/15a98f745ba04baabe9dcc02bb8616ba as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/15a98f745ba04baabe9dcc02bb8616ba 2024-11-28T07:23:55,262 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:55,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T07:23:55,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:55,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:55,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:55,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:55,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:55,265 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into 15a98f745ba04baabe9dcc02bb8616ba(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:55,265 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:55,265 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=13, startTime=1732778634667; duration=0sec 2024-11-28T07:23:55,265 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:55,265 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:23:55,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:55,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/11370cb7d260403b9d06b7763bd96d31 is 50, key is test_row_0/B:col10/1732778634083/Put/seqid=0 2024-11-28T07:23:55,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742553_1729 (size=12151) 2024-11-28T07:23:55,315 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/11370cb7d260403b9d06b7763bd96d31 2024-11-28T07:23:55,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/8e1baca52ed2401e8a07fe6c2d325f3b is 50, key is test_row_0/C:col10/1732778634083/Put/seqid=0 2024-11-28T07:23:55,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:55,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778695342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:55,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:55,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778695349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:55,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:55,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778695349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:55,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:55,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778695349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:55,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742554_1730 (size=12151) 2024-11-28T07:23:55,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/8e1baca52ed2401e8a07fe6c2d325f3b 2024-11-28T07:23:55,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/057490e761ba49f09419748448d36857 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/057490e761ba49f09419748448d36857 2024-11-28T07:23:55,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/057490e761ba49f09419748448d36857, entries=250, sequenceid=159, filesize=47.3 K 2024-11-28T07:23:55,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/11370cb7d260403b9d06b7763bd96d31 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/11370cb7d260403b9d06b7763bd96d31 2024-11-28T07:23:55,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/11370cb7d260403b9d06b7763bd96d31, entries=150, sequenceid=159, filesize=11.9 K 2024-11-28T07:23:55,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/8e1baca52ed2401e8a07fe6c2d325f3b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/8e1baca52ed2401e8a07fe6c2d325f3b 2024-11-28T07:23:55,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/8e1baca52ed2401e8a07fe6c2d325f3b, entries=150, sequenceid=159, filesize=11.9 K 2024-11-28T07:23:55,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a6b84436e6ee345d2d4f94cd524e48a2 in 705ms, sequenceid=159, compaction requested=false 2024-11-28T07:23:55,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:55,420 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:55,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T07:23:55,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:55,420 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T07:23:55,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:55,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128a3c5d22b7dea416494142e76e9c2dd48_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778634719/Put/seqid=0 2024-11-28T07:23:55,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742555_1731 (size=12304) 2024-11-28T07:23:55,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,499 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128a3c5d22b7dea416494142e76e9c2dd48_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128a3c5d22b7dea416494142e76e9c2dd48_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:55,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/44c08b8c95924426a84f38be67de129e, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:55,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/44c08b8c95924426a84f38be67de129e is 175, key is test_row_0/A:col10/1732778634719/Put/seqid=0 2024-11-28T07:23:55,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742556_1732 (size=31105) 2024-11-28T07:23:55,543 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/44c08b8c95924426a84f38be67de129e 2024-11-28T07:23:55,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/9753c1f4a5554a6da28957c3fbe4e7bd is 50, key is test_row_0/B:col10/1732778634719/Put/seqid=0 2024-11-28T07:23:55,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742557_1733 (size=12151) 2024-11-28T07:23:55,622 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/9753c1f4a5554a6da28957c3fbe4e7bd 2024-11-28T07:23:55,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/5dce0991e33542149d0a806b745cfe0f is 50, key is test_row_0/C:col10/1732778634719/Put/seqid=0 2024-11-28T07:23:55,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742558_1734 (size=12151) 2024-11-28T07:23:55,687 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/5dce0991e33542149d0a806b745cfe0f 2024-11-28T07:23:55,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/44c08b8c95924426a84f38be67de129e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/44c08b8c95924426a84f38be67de129e 2024-11-28T07:23:55,694 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/44c08b8c95924426a84f38be67de129e, entries=150, sequenceid=176, filesize=30.4 K 2024-11-28T07:23:55,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/9753c1f4a5554a6da28957c3fbe4e7bd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/9753c1f4a5554a6da28957c3fbe4e7bd 2024-11-28T07:23:55,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,699 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/9753c1f4a5554a6da28957c3fbe4e7bd, entries=150, sequenceid=176, filesize=11.9 K 2024-11-28T07:23:55,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/5dce0991e33542149d0a806b745cfe0f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5dce0991e33542149d0a806b745cfe0f 2024-11-28T07:23:55,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,704 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5dce0991e33542149d0a806b745cfe0f, entries=150, sequenceid=176, filesize=11.9 K 2024-11-28T07:23:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,705 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for a6b84436e6ee345d2d4f94cd524e48a2 in 285ms, sequenceid=176, compaction requested=true 2024-11-28T07:23:55,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:55,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:55,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-28T07:23:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-28T07:23:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-28T07:23:55,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6850 sec 2024-11-28T07:23:55,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 1.6890 sec 2024-11-28T07:23:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:23:55,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:55,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:55,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:55,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:55,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:55,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:55,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:55,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:55,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:56,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285ae447df78884a708142bd4f931b93be_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778635971/Put/seqid=0 2024-11-28T07:23:56,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742559_1735 (size=19774) 2024-11-28T07:23:56,023 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:56,028 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285ae447df78884a708142bd4f931b93be_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285ae447df78884a708142bd4f931b93be_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:56,029 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/90ec350c15bf498aa504bb352a5c4b19, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:56,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/90ec350c15bf498aa504bb352a5c4b19 is 175, key is test_row_0/A:col10/1732778635971/Put/seqid=0 2024-11-28T07:23:56,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742560_1736 (size=57033) 2024-11-28T07:23:56,061 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=187, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/90ec350c15bf498aa504bb352a5c4b19 2024-11-28T07:23:56,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/3ce5364970e94a0f91684514cc4a6cc1 is 50, key is test_row_0/B:col10/1732778635971/Put/seqid=0 2024-11-28T07:23:56,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742561_1737 (size=12151) 2024-11-28T07:23:56,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/3ce5364970e94a0f91684514cc4a6cc1 2024-11-28T07:23:56,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/dbecdeb4300a46a2973c44a0a2c2b0ba is 50, key is test_row_0/C:col10/1732778635971/Put/seqid=0 2024-11-28T07:23:56,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742562_1738 (size=12151) 2024-11-28T07:23:56,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778696102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778696109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778696110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778696111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T07:23:56,125 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-28T07:23:56,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:56,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-11-28T07:23:56,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T07:23:56,130 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:56,131 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:56,131 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:56,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778696212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778696224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778696224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778696225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T07:23:56,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778696241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,283 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:56,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-28T07:23:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:56,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:56,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:56,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:56,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:56,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778696417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778696429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778696429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T07:23:56,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778696430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,436 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:56,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-28T07:23:56,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:56,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:56,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:56,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:56,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:56,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:56,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/dbecdeb4300a46a2973c44a0a2c2b0ba 2024-11-28T07:23:56,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/90ec350c15bf498aa504bb352a5c4b19 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/90ec350c15bf498aa504bb352a5c4b19 2024-11-28T07:23:56,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/90ec350c15bf498aa504bb352a5c4b19, entries=300, sequenceid=187, filesize=55.7 K 2024-11-28T07:23:56,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/3ce5364970e94a0f91684514cc4a6cc1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3ce5364970e94a0f91684514cc4a6cc1 2024-11-28T07:23:56,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3ce5364970e94a0f91684514cc4a6cc1, entries=150, sequenceid=187, filesize=11.9 K 2024-11-28T07:23:56,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/dbecdeb4300a46a2973c44a0a2c2b0ba as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/dbecdeb4300a46a2973c44a0a2c2b0ba 2024-11-28T07:23:56,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/dbecdeb4300a46a2973c44a0a2c2b0ba, entries=150, sequenceid=187, filesize=11.9 K 2024-11-28T07:23:56,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for a6b84436e6ee345d2d4f94cd524e48a2 in 535ms, sequenceid=187, compaction requested=true 2024-11-28T07:23:56,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:56,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:56,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:56,509 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:56,509 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:56,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:56,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:56,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:56,509 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:56,510 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:56,510 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:23:56,510 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:56,510 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/12a90c69c5614496992d29380fa9a18b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/11370cb7d260403b9d06b7763bd96d31, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/9753c1f4a5554a6da28957c3fbe4e7bd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3ce5364970e94a0f91684514cc4a6cc1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=47.8 K 2024-11-28T07:23:56,510 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 167940 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:56,510 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:23:56,510 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:56,511 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c7dd02a4ec4a4948aeaa2b7e62fbdd19, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/057490e761ba49f09419748448d36857, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/44c08b8c95924426a84f38be67de129e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/90ec350c15bf498aa504bb352a5c4b19] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=164.0 K 2024-11-28T07:23:56,511 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:56,511 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c7dd02a4ec4a4948aeaa2b7e62fbdd19, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/057490e761ba49f09419748448d36857, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/44c08b8c95924426a84f38be67de129e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/90ec350c15bf498aa504bb352a5c4b19] 2024-11-28T07:23:56,511 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 12a90c69c5614496992d29380fa9a18b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732778632893 2024-11-28T07:23:56,511 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7dd02a4ec4a4948aeaa2b7e62fbdd19, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732778632893 2024-11-28T07:23:56,512 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 11370cb7d260403b9d06b7763bd96d31, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732778634083 2024-11-28T07:23:56,512 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 057490e761ba49f09419748448d36857, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732778634083 2024-11-28T07:23:56,512 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44c08b8c95924426a84f38be67de129e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732778634719 2024-11-28T07:23:56,512 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 9753c1f4a5554a6da28957c3fbe4e7bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732778634719 2024-11-28T07:23:56,513 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ce5364970e94a0f91684514cc4a6cc1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732778635924 2024-11-28T07:23:56,513 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90ec350c15bf498aa504bb352a5c4b19, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732778635923 2024-11-28T07:23:56,526 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#630 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:56,526 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/09886e2624344ab696e907935b518b0d is 50, key is test_row_0/B:col10/1732778635971/Put/seqid=0 2024-11-28T07:23:56,532 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:56,541 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112812ef5a66480d42d391d0438eb2985a8c_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:56,543 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112812ef5a66480d42d391d0438eb2985a8c_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:56,543 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112812ef5a66480d42d391d0438eb2985a8c_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:56,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742563_1739 (size=12595) 2024-11-28T07:23:56,573 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/09886e2624344ab696e907935b518b0d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/09886e2624344ab696e907935b518b0d 2024-11-28T07:23:56,578 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into 09886e2624344ab696e907935b518b0d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:56,578 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:56,578 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=12, startTime=1732778636509; duration=0sec 2024-11-28T07:23:56,578 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:56,578 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:23:56,578 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:23:56,580 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:23:56,580 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:23:56,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742564_1740 (size=4469) 2024-11-28T07:23:56,580 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:56,580 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/15a98f745ba04baabe9dcc02bb8616ba, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/8e1baca52ed2401e8a07fe6c2d325f3b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5dce0991e33542149d0a806b745cfe0f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/dbecdeb4300a46a2973c44a0a2c2b0ba] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=47.8 K 2024-11-28T07:23:56,580 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 15a98f745ba04baabe9dcc02bb8616ba, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1732778632893 2024-11-28T07:23:56,581 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#631 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:56,581 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/db6104afa82b4e6e83c2315eeddeaf4a is 175, key is test_row_0/A:col10/1732778635971/Put/seqid=0 2024-11-28T07:23:56,582 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e1baca52ed2401e8a07fe6c2d325f3b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732778634083 2024-11-28T07:23:56,582 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dce0991e33542149d0a806b745cfe0f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732778634719 2024-11-28T07:23:56,583 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting dbecdeb4300a46a2973c44a0a2c2b0ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732778635924 2024-11-28T07:23:56,589 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:56,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-28T07:23:56,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:56,590 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:23:56,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:56,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:56,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:56,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:56,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:56,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:56,606 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#632 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:56,607 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/3e37c3fe462f4da48cdb1653b12dad8b is 50, key is test_row_0/C:col10/1732778635971/Put/seqid=0 2024-11-28T07:23:56,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742565_1741 (size=31549) 2024-11-28T07:23:56,655 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/db6104afa82b4e6e83c2315eeddeaf4a as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/db6104afa82b4e6e83c2315eeddeaf4a 2024-11-28T07:23:56,659 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into db6104afa82b4e6e83c2315eeddeaf4a(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:56,659 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:56,659 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=12, startTime=1732778636509; duration=0sec 2024-11-28T07:23:56,659 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:56,659 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:23:56,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280322b66556d944e5a0fae661ea3917fa_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778636109/Put/seqid=0 2024-11-28T07:23:56,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742566_1742 (size=12595) 2024-11-28T07:23:56,701 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/3e37c3fe462f4da48cdb1653b12dad8b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/3e37c3fe462f4da48cdb1653b12dad8b 2024-11-28T07:23:56,707 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into 3e37c3fe462f4da48cdb1653b12dad8b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:56,707 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:56,707 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=12, startTime=1732778636509; duration=0sec 2024-11-28T07:23:56,707 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:56,707 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:23:56,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742567_1743 (size=12304) 2024-11-28T07:23:56,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:56,729 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280322b66556d944e5a0fae661ea3917fa_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280322b66556d944e5a0fae661ea3917fa_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:56,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:56,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:56,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/3c2235a8f8444bd09aa526d1facdba98, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:56,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/3c2235a8f8444bd09aa526d1facdba98 is 175, key is test_row_0/A:col10/1732778636109/Put/seqid=0 2024-11-28T07:23:56,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T07:23:56,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778696745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778696745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778696747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778696746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742568_1744 (size=31105) 2024-11-28T07:23:56,778 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/3c2235a8f8444bd09aa526d1facdba98 2024-11-28T07:23:56,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/b84744d575e34e50b6d97977a62ad53b is 50, key is test_row_0/B:col10/1732778636109/Put/seqid=0 2024-11-28T07:23:56,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742569_1745 (size=12151) 2024-11-28T07:23:56,853 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/b84744d575e34e50b6d97977a62ad53b 2024-11-28T07:23:56,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778696850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778696852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778696852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:56,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778696852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:56,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/fae893d3228c47f39cb83f596f619269 is 50, key is test_row_0/C:col10/1732778636109/Put/seqid=0 2024-11-28T07:23:56,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742570_1746 (size=12151) 2024-11-28T07:23:57,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778697056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778697056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778697056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778697059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T07:23:57,324 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/fae893d3228c47f39cb83f596f619269 2024-11-28T07:23:57,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/3c2235a8f8444bd09aa526d1facdba98 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/3c2235a8f8444bd09aa526d1facdba98 2024-11-28T07:23:57,333 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/3c2235a8f8444bd09aa526d1facdba98, entries=150, sequenceid=213, filesize=30.4 K 2024-11-28T07:23:57,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/b84744d575e34e50b6d97977a62ad53b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b84744d575e34e50b6d97977a62ad53b 2024-11-28T07:23:57,337 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b84744d575e34e50b6d97977a62ad53b, entries=150, sequenceid=213, filesize=11.9 K 2024-11-28T07:23:57,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/fae893d3228c47f39cb83f596f619269 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fae893d3228c47f39cb83f596f619269 2024-11-28T07:23:57,342 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fae893d3228c47f39cb83f596f619269, entries=150, sequenceid=213, filesize=11.9 K 2024-11-28T07:23:57,343 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a6b84436e6ee345d2d4f94cd524e48a2 in 753ms, sequenceid=213, compaction requested=false 2024-11-28T07:23:57,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:57,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:57,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-11-28T07:23:57,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-11-28T07:23:57,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-28T07:23:57,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2130 sec 2024-11-28T07:23:57,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 1.2160 sec 2024-11-28T07:23:57,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:23:57,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:57,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:57,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:57,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:57,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:57,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:57,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:57,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112839c9db04227041f4b6c08e5fefaeffc5_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778637362/Put/seqid=0 2024-11-28T07:23:57,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778697406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778697409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778697409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778697411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742571_1747 (size=14794) 2024-11-28T07:23:57,433 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:57,440 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112839c9db04227041f4b6c08e5fefaeffc5_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112839c9db04227041f4b6c08e5fefaeffc5_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:57,441 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/c9491c9a1af14ce7aa0aa3358305e108, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:57,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/c9491c9a1af14ce7aa0aa3358305e108 is 175, key is test_row_0/A:col10/1732778637362/Put/seqid=0 2024-11-28T07:23:57,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742572_1748 (size=39749) 2024-11-28T07:23:57,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778697513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778697516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778697516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778697517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778697720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778697720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778697728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778697729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:57,851 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=227, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/c9491c9a1af14ce7aa0aa3358305e108 2024-11-28T07:23:57,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/165afd09bea64906b50c79020179d156 is 50, key is test_row_0/B:col10/1732778637362/Put/seqid=0 2024-11-28T07:23:57,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742573_1749 (size=12151) 2024-11-28T07:23:57,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/165afd09bea64906b50c79020179d156 2024-11-28T07:23:57,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/06a731c7891d43feaa96780d6eebb884 is 50, key is test_row_0/C:col10/1732778637362/Put/seqid=0 2024-11-28T07:23:57,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742574_1750 (size=12151) 2024-11-28T07:23:57,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/06a731c7891d43feaa96780d6eebb884 2024-11-28T07:23:57,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/c9491c9a1af14ce7aa0aa3358305e108 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c9491c9a1af14ce7aa0aa3358305e108 2024-11-28T07:23:57,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c9491c9a1af14ce7aa0aa3358305e108, entries=200, sequenceid=227, filesize=38.8 K 2024-11-28T07:23:57,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/165afd09bea64906b50c79020179d156 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/165afd09bea64906b50c79020179d156 2024-11-28T07:23:57,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/165afd09bea64906b50c79020179d156, entries=150, sequenceid=227, filesize=11.9 K 2024-11-28T07:23:57,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/06a731c7891d43feaa96780d6eebb884 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/06a731c7891d43feaa96780d6eebb884 2024-11-28T07:23:58,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/06a731c7891d43feaa96780d6eebb884, entries=150, sequenceid=227, filesize=11.9 K 2024-11-28T07:23:58,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a6b84436e6ee345d2d4f94cd524e48a2 in 641ms, sequenceid=227, compaction requested=true 2024-11-28T07:23:58,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:58,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:23:58,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:58,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:23:58,005 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:58,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:58,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:23:58,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:23:58,005 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:58,006 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102403 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:58,006 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:23:58,006 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,006 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/db6104afa82b4e6e83c2315eeddeaf4a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/3c2235a8f8444bd09aa526d1facdba98, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c9491c9a1af14ce7aa0aa3358305e108] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=100.0 K 2024-11-28T07:23:58,006 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,006 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/db6104afa82b4e6e83c2315eeddeaf4a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/3c2235a8f8444bd09aa526d1facdba98, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c9491c9a1af14ce7aa0aa3358305e108] 2024-11-28T07:23:58,007 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:58,007 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:23:58,007 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,007 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/09886e2624344ab696e907935b518b0d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b84744d575e34e50b6d97977a62ad53b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/165afd09bea64906b50c79020179d156] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=36.0 K 2024-11-28T07:23:58,007 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting db6104afa82b4e6e83c2315eeddeaf4a, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732778635924 2024-11-28T07:23:58,007 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 09886e2624344ab696e907935b518b0d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732778635924 2024-11-28T07:23:58,007 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c2235a8f8444bd09aa526d1facdba98, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732778636098 2024-11-28T07:23:58,008 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting b84744d575e34e50b6d97977a62ad53b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732778636098 2024-11-28T07:23:58,008 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9491c9a1af14ce7aa0aa3358305e108, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732778636740 2024-11-28T07:23:58,008 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 165afd09bea64906b50c79020179d156, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732778636740 2024-11-28T07:23:58,026 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:58,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:58,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T07:23:58,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:58,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:58,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:58,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:58,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:58,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:58,036 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#640 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:58,037 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/5a0b3d516a9c4779909cfeb252d52d27 is 50, key is test_row_0/B:col10/1732778637362/Put/seqid=0 2024-11-28T07:23:58,038 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411288b4c6e9738d74fd28ffc9f73dd6e4e26_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:58,040 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411288b4c6e9738d74fd28ffc9f73dd6e4e26_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:58,040 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288b4c6e9738d74fd28ffc9f73dd6e4e26_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:58,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778698044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778698045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778698048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778698054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742575_1751 (size=12697) 2024-11-28T07:23:58,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280a9d4a53ff2c4aa490d4e390fe316afe_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778637404/Put/seqid=0 2024-11-28T07:23:58,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742576_1752 (size=4469) 2024-11-28T07:23:58,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742577_1753 (size=14794) 2024-11-28T07:23:58,152 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:58,156 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280a9d4a53ff2c4aa490d4e390fe316afe_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280a9d4a53ff2c4aa490d4e390fe316afe_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:58,157 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/10f9a06c26354072b56812dce1c7f014, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:58,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/10f9a06c26354072b56812dce1c7f014 is 175, key is test_row_0/A:col10/1732778637404/Put/seqid=0 2024-11-28T07:23:58,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778698153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778698154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778698155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778698157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742578_1754 (size=39749) 2024-11-28T07:23:58,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T07:23:58,235 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-28T07:23:58,237 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:23:58,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-11-28T07:23:58,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T07:23:58,239 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:23:58,240 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:23:58,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:23:58,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778698245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,250 DEBUG [Thread-2988 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:23:58,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T07:23:58,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778698359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778698361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778698361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778698365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,392 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:58,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T07:23:58,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:58,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,474 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/5a0b3d516a9c4779909cfeb252d52d27 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5a0b3d516a9c4779909cfeb252d52d27 2024-11-28T07:23:58,479 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into 5a0b3d516a9c4779909cfeb252d52d27(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:58,479 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:58,479 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=13, startTime=1732778638005; duration=0sec 2024-11-28T07:23:58,479 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:23:58,479 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:23:58,479 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:23:58,481 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:23:58,481 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:23:58,481 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,481 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/3e37c3fe462f4da48cdb1653b12dad8b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fae893d3228c47f39cb83f596f619269, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/06a731c7891d43feaa96780d6eebb884] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=36.0 K 2024-11-28T07:23:58,481 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e37c3fe462f4da48cdb1653b12dad8b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732778635924 2024-11-28T07:23:58,482 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting fae893d3228c47f39cb83f596f619269, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732778636098 2024-11-28T07:23:58,482 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 06a731c7891d43feaa96780d6eebb884, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732778636740 2024-11-28T07:23:58,498 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#642 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:58,499 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/e92883d1494a4fa989ab9691f29f47b7 is 50, key is test_row_0/C:col10/1732778637362/Put/seqid=0 2024-11-28T07:23:58,519 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#639 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:23:58,520 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/d409b89a61bc482f8ccdc17d0ef4ae95 is 175, key is test_row_0/A:col10/1732778637362/Put/seqid=0 2024-11-28T07:23:58,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T07:23:58,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:58,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T07:23:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742580_1756 (size=31651) 2024-11-28T07:23:58,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742579_1755 (size=12697) 2024-11-28T07:23:58,564 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/e92883d1494a4fa989ab9691f29f47b7 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e92883d1494a4fa989ab9691f29f47b7 2024-11-28T07:23:58,568 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/d409b89a61bc482f8ccdc17d0ef4ae95 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d409b89a61bc482f8ccdc17d0ef4ae95 2024-11-28T07:23:58,569 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into e92883d1494a4fa989ab9691f29f47b7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:58,569 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:58,569 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=13, startTime=1732778638005; duration=0sec 2024-11-28T07:23:58,569 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:58,569 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:23:58,574 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into d409b89a61bc482f8ccdc17d0ef4ae95(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:23:58,574 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:58,574 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=13, startTime=1732778638005; duration=0sec 2024-11-28T07:23:58,574 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:23:58,574 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:23:58,623 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/10f9a06c26354072b56812dce1c7f014 2024-11-28T07:23:58,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/8a41336e0e2f45fab1af54187e0a178f is 50, key is test_row_0/B:col10/1732778637404/Put/seqid=0 2024-11-28T07:23:58,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778698665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778698667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778698667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:58,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778698671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:58,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742581_1757 (size=12151) 2024-11-28T07:23:58,698 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:58,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T07:23:58,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:58,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,700 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T07:23:58,852 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:58,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T07:23:58,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:58,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:58,853 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:58,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,013 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:59,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T07:23:59,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:59,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,013 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/8a41336e0e2f45fab1af54187e0a178f 2024-11-28T07:23:59,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/95f4c981a83b4225aa97d4cc3deaf6a6 is 50, key is test_row_0/C:col10/1732778637404/Put/seqid=0 2024-11-28T07:23:59,165 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:59,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T07:23:59,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742582_1758 (size=12151) 2024-11-28T07:23:59,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:59,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,177 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:59,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778699179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:59,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:59,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778699179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:59,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778699179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:59,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:23:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778699179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:23:59,331 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:59,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T07:23:59,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:59,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T07:23:59,484 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:59,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T07:23:59,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:23:59,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:23:59,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/95f4c981a83b4225aa97d4cc3deaf6a6 2024-11-28T07:23:59,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/10f9a06c26354072b56812dce1c7f014 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/10f9a06c26354072b56812dce1c7f014 2024-11-28T07:23:59,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/10f9a06c26354072b56812dce1c7f014, entries=200, sequenceid=252, filesize=38.8 K 2024-11-28T07:23:59,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/8a41336e0e2f45fab1af54187e0a178f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/8a41336e0e2f45fab1af54187e0a178f 2024-11-28T07:23:59,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/8a41336e0e2f45fab1af54187e0a178f, entries=150, sequenceid=252, filesize=11.9 K 2024-11-28T07:23:59,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/95f4c981a83b4225aa97d4cc3deaf6a6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/95f4c981a83b4225aa97d4cc3deaf6a6 2024-11-28T07:23:59,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/95f4c981a83b4225aa97d4cc3deaf6a6, entries=150, sequenceid=252, filesize=11.9 K 2024-11-28T07:23:59,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a6b84436e6ee345d2d4f94cd524e48a2 in 1579ms, sequenceid=252, compaction requested=false 2024-11-28T07:23:59,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:59,637 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:23:59,639 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T07:23:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,639 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:23:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:23:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:23:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:23:59,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:23:59,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128a2f8ef2cd5ee472286c679060d67e8fc_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778638049/Put/seqid=0 2024-11-28T07:23:59,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742583_1759 (size=12454) 2024-11-28T07:23:59,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,720 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128a2f8ef2cd5ee472286c679060d67e8fc_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128a2f8ef2cd5ee472286c679060d67e8fc_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:23:59,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/8da4adf4b0c6490aa3b44d7b9e822d71, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:23:59,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/8da4adf4b0c6490aa3b44d7b9e822d71 is 175, key is test_row_0/A:col10/1732778638049/Put/seqid=0 2024-11-28T07:23:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742584_1760 (size=31255) 2024-11-28T07:23:59,757 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=266, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/8da4adf4b0c6490aa3b44d7b9e822d71 2024-11-28T07:23:59,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/2ee2c2f6a768478ba1b075b950e629ca is 50, key is test_row_0/B:col10/1732778638049/Put/seqid=0 2024-11-28T07:23:59,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742585_1761 (size=12301) 2024-11-28T07:23:59,818 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/2ee2c2f6a768478ba1b075b950e629ca 2024-11-28T07:23:59,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/c858e8293e2745889b7cf5fd8ca0ab9d is 50, key is test_row_0/C:col10/1732778638049/Put/seqid=0 2024-11-28T07:23:59,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742586_1762 (size=12301) 2024-11-28T07:23:59,886 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/c858e8293e2745889b7cf5fd8ca0ab9d 2024-11-28T07:23:59,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/8da4adf4b0c6490aa3b44d7b9e822d71 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/8da4adf4b0c6490aa3b44d7b9e822d71 2024-11-28T07:23:59,895 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/8da4adf4b0c6490aa3b44d7b9e822d71, entries=150, sequenceid=266, filesize=30.5 K 2024-11-28T07:23:59,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/2ee2c2f6a768478ba1b075b950e629ca as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/2ee2c2f6a768478ba1b075b950e629ca 2024-11-28T07:23:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,902 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/2ee2c2f6a768478ba1b075b950e629ca, entries=150, sequenceid=266, filesize=12.0 K 2024-11-28T07:23:59,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/c858e8293e2745889b7cf5fd8ca0ab9d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c858e8293e2745889b7cf5fd8ca0ab9d 2024-11-28T07:23:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,907 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c858e8293e2745889b7cf5fd8ca0ab9d, entries=150, sequenceid=266, filesize=12.0 K 2024-11-28T07:23:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,908 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for a6b84436e6ee345d2d4f94cd524e48a2 in 269ms, sequenceid=266, compaction requested=true 2024-11-28T07:23:59,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:23:59,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:23:59,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-28T07:23:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-11-28T07:23:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-28T07:23:59,911 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6700 sec 2024-11-28T07:23:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,913 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 1.6750 sec 2024-11-28T07:23:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:23:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:24:00,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:00,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:00,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:00,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:00,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:00,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:00,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:00,240 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112877761030b19946f793c4cee7b5e8e254_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778640220/Put/seqid=0 2024-11-28T07:24:00,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742587_1763 (size=14994) 2024-11-28T07:24:00,286 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:00,291 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112877761030b19946f793c4cee7b5e8e254_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112877761030b19946f793c4cee7b5e8e254_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:00,292 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/32f13b2b62de4009b88e01f299e95e4d, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:00,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/32f13b2b62de4009b88e01f299e95e4d is 175, key is test_row_0/A:col10/1732778640220/Put/seqid=0 2024-11-28T07:24:00,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778700298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778700300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778700303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778700304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742588_1764 (size=39949) 2024-11-28T07:24:00,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T07:24:00,343 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-11-28T07:24:00,345 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:24:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees 2024-11-28T07:24:00,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T07:24:00,347 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:24:00,347 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:24:00,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:24:00,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778700412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778700412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778700417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778700418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T07:24:00,500 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:00,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T07:24:00,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:00,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,501 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778700618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778700620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778700622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778700624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T07:24:00,653 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:00,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T07:24:00,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:00,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,730 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/32f13b2b62de4009b88e01f299e95e4d 2024-11-28T07:24:00,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/86bdc49f8b634c44ae5658dc4e867813 is 50, key is test_row_0/B:col10/1732778640220/Put/seqid=0 2024-11-28T07:24:00,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742589_1765 (size=12301) 2024-11-28T07:24:00,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/86bdc49f8b634c44ae5658dc4e867813 2024-11-28T07:24:00,806 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:00,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T07:24:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,807 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/7034c6719cab49ce8523123e3c5a02c1 is 50, key is test_row_0/C:col10/1732778640220/Put/seqid=0 2024-11-28T07:24:00,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742590_1766 (size=12301) 2024-11-28T07:24:00,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/7034c6719cab49ce8523123e3c5a02c1 2024-11-28T07:24:00,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/32f13b2b62de4009b88e01f299e95e4d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/32f13b2b62de4009b88e01f299e95e4d 2024-11-28T07:24:00,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/32f13b2b62de4009b88e01f299e95e4d, entries=200, sequenceid=277, filesize=39.0 K 2024-11-28T07:24:00,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/86bdc49f8b634c44ae5658dc4e867813 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/86bdc49f8b634c44ae5658dc4e867813 2024-11-28T07:24:00,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/86bdc49f8b634c44ae5658dc4e867813, entries=150, sequenceid=277, filesize=12.0 K 2024-11-28T07:24:00,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/7034c6719cab49ce8523123e3c5a02c1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7034c6719cab49ce8523123e3c5a02c1 2024-11-28T07:24:00,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7034c6719cab49ce8523123e3c5a02c1, entries=150, sequenceid=277, filesize=12.0 K 2024-11-28T07:24:00,881 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for a6b84436e6ee345d2d4f94cd524e48a2 in 651ms, sequenceid=277, compaction requested=true 2024-11-28T07:24:00,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:00,881 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:24:00,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:24:00,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:00,881 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:24:00,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:24:00,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:00,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:24:00,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:00,883 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142604 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:24:00,883 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:24:00,883 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:24:00,883 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:24:00,883 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,883 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,883 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5a0b3d516a9c4779909cfeb252d52d27, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/8a41336e0e2f45fab1af54187e0a178f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/2ee2c2f6a768478ba1b075b950e629ca, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/86bdc49f8b634c44ae5658dc4e867813] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=48.3 K 2024-11-28T07:24:00,884 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d409b89a61bc482f8ccdc17d0ef4ae95, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/10f9a06c26354072b56812dce1c7f014, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/8da4adf4b0c6490aa3b44d7b9e822d71, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/32f13b2b62de4009b88e01f299e95e4d] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=139.3 K 2024-11-28T07:24:00,884 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,884 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d409b89a61bc482f8ccdc17d0ef4ae95, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/10f9a06c26354072b56812dce1c7f014, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/8da4adf4b0c6490aa3b44d7b9e822d71, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/32f13b2b62de4009b88e01f299e95e4d] 2024-11-28T07:24:00,885 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a0b3d516a9c4779909cfeb252d52d27, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732778636740 2024-11-28T07:24:00,886 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting d409b89a61bc482f8ccdc17d0ef4ae95, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732778636740 2024-11-28T07:24:00,886 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10f9a06c26354072b56812dce1c7f014, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732778637404 2024-11-28T07:24:00,886 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a41336e0e2f45fab1af54187e0a178f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732778637404 2024-11-28T07:24:00,886 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8da4adf4b0c6490aa3b44d7b9e822d71, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1732778638039 2024-11-28T07:24:00,886 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ee2c2f6a768478ba1b075b950e629ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1732778638039 2024-11-28T07:24:00,887 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32f13b2b62de4009b88e01f299e95e4d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778640220 2024-11-28T07:24:00,887 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 86bdc49f8b634c44ae5658dc4e867813, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778640220 2024-11-28T07:24:00,915 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#651 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:00,916 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/3e31027b98024d4fb05452b8fc9e1446 is 50, key is test_row_0/B:col10/1732778640220/Put/seqid=0 2024-11-28T07:24:00,918 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:00,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:00,930 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-28T07:24:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:00,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:00,936 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128d14b912badc043fc950020781059ea2f_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:00,938 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128d14b912badc043fc950020781059ea2f_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:00,938 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d14b912badc043fc950020781059ea2f_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:00,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742591_1767 (size=12983) 2024-11-28T07:24:00,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T07:24:00,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778700942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778700942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778700943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:00,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778700949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:00,956 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/3e31027b98024d4fb05452b8fc9e1446 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3e31027b98024d4fb05452b8fc9e1446 2024-11-28T07:24:00,959 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:00,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T07:24:00,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:00,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:00,964 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into 3e31027b98024d4fb05452b8fc9e1446(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:00,964 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:00,964 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=12, startTime=1732778640881; duration=0sec 2024-11-28T07:24:00,964 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:00,964 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:24:00,964 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:24:00,966 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:24:00,967 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:24:00,967 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:00,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112887f9237501e145538c187ae10e43dd59_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778640929/Put/seqid=0 2024-11-28T07:24:00,967 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e92883d1494a4fa989ab9691f29f47b7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/95f4c981a83b4225aa97d4cc3deaf6a6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c858e8293e2745889b7cf5fd8ca0ab9d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7034c6719cab49ce8523123e3c5a02c1] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=48.3 K 2024-11-28T07:24:00,967 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e92883d1494a4fa989ab9691f29f47b7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732778636740 2024-11-28T07:24:00,967 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 95f4c981a83b4225aa97d4cc3deaf6a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732778637404 2024-11-28T07:24:00,968 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting c858e8293e2745889b7cf5fd8ca0ab9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1732778638039 2024-11-28T07:24:00,968 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 7034c6719cab49ce8523123e3c5a02c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778640220 2024-11-28T07:24:00,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742592_1768 (size=4469) 2024-11-28T07:24:01,013 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#654 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:01,013 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/44eda0d912d84309b93a7e9a7ac67194 is 50, key is test_row_0/C:col10/1732778640220/Put/seqid=0 2024-11-28T07:24:01,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742593_1769 (size=14994) 2024-11-28T07:24:01,030 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,035 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112887f9237501e145538c187ae10e43dd59_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112887f9237501e145538c187ae10e43dd59_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:01,036 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/43513a05b306413bb48ec9452237c254, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:01,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/43513a05b306413bb48ec9452237c254 is 175, key is test_row_0/A:col10/1732778640929/Put/seqid=0 2024-11-28T07:24:01,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778701051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778701052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778701052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778701055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742594_1770 (size=12983) 2024-11-28T07:24:01,081 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/44eda0d912d84309b93a7e9a7ac67194 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44eda0d912d84309b93a7e9a7ac67194 2024-11-28T07:24:01,086 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into 44eda0d912d84309b93a7e9a7ac67194(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:01,086 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:01,086 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=12, startTime=1732778640883; duration=0sec 2024-11-28T07:24:01,086 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:01,086 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:24:01,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742595_1771 (size=39949) 2024-11-28T07:24:01,088 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=305, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/43513a05b306413bb48ec9452237c254 2024-11-28T07:24:01,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/58109c171be645a0a28820144f8ac83f is 50, key is test_row_0/B:col10/1732778640929/Put/seqid=0 2024-11-28T07:24:01,111 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:01,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T07:24:01,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:01,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:01,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:01,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742596_1772 (size=12301) 2024-11-28T07:24:01,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/58109c171be645a0a28820144f8ac83f 2024-11-28T07:24:01,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/c9de82a550a4444d80d5312e2bc4889e is 50, key is test_row_0/C:col10/1732778640929/Put/seqid=0 2024-11-28T07:24:01,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742597_1773 (size=12301) 2024-11-28T07:24:01,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778701255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778701257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778701258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778701260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,264 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:01,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T07:24:01,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:01,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:01,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:01,265 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,392 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#652 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:01,392 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/f1de003d7a5d452ea26ae06ce98c38f6 is 175, key is test_row_0/A:col10/1732778640220/Put/seqid=0 2024-11-28T07:24:01,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742598_1774 (size=31937) 2024-11-28T07:24:01,419 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:01,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T07:24:01,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:01,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:01,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:01,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T07:24:01,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778701560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778701560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778701563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:01,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778701565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:01,572 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:01,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T07:24:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:01,573 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] handler.RSProcedureHandler(58): pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=184 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=184 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:01,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/c9de82a550a4444d80d5312e2bc4889e 2024-11-28T07:24:01,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/43513a05b306413bb48ec9452237c254 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43513a05b306413bb48ec9452237c254 2024-11-28T07:24:01,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43513a05b306413bb48ec9452237c254, entries=200, sequenceid=305, filesize=39.0 K 2024-11-28T07:24:01,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/58109c171be645a0a28820144f8ac83f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/58109c171be645a0a28820144f8ac83f 2024-11-28T07:24:01,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/58109c171be645a0a28820144f8ac83f, entries=150, sequenceid=305, filesize=12.0 K 2024-11-28T07:24:01,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/c9de82a550a4444d80d5312e2bc4889e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c9de82a550a4444d80d5312e2bc4889e 2024-11-28T07:24:01,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c9de82a550a4444d80d5312e2bc4889e, entries=150, sequenceid=305, filesize=12.0 K 2024-11-28T07:24:01,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for a6b84436e6ee345d2d4f94cd524e48a2 in 679ms, sequenceid=305, compaction requested=false 2024-11-28T07:24:01,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:01,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,725 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:01,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T07:24:01,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:01,726 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-28T07:24:01,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:01,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:01,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:01,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:01,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:01,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:01,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128605ddf7e38be40ee848f2040f482a86b_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_1/A:col10/1732778640941/Put/seqid=0 2024-11-28T07:24:01,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742599_1775 (size=9914) 2024-11-28T07:24:01,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,816 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/f1de003d7a5d452ea26ae06ce98c38f6 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/f1de003d7a5d452ea26ae06ce98c38f6 2024-11-28T07:24:01,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,821 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into f1de003d7a5d452ea26ae06ce98c38f6(size=31.2 K), total size for store is 70.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:01,821 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:01,821 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=12, startTime=1732778640881; duration=0sec 2024-11-28T07:24:01,821 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:01,821 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:24:01,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:01,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:02,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:02,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778702127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778702129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778702130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778702131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,206 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128605ddf7e38be40ee848f2040f482a86b_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128605ddf7e38be40ee848f2040f482a86b_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:02,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/872df396b9914bee88286f3463bb0b41, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:02,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/872df396b9914bee88286f3463bb0b41 is 175, key is test_row_1/A:col10/1732778640941/Put/seqid=0 2024-11-28T07:24:02,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778702233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778702235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778702236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778702236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742600_1776 (size=22561) 2024-11-28T07:24:02,254 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/872df396b9914bee88286f3463bb0b41 2024-11-28T07:24:02,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/f7ec0545928f43b9bcc22335c0f66e12 is 50, key is test_row_1/B:col10/1732778640941/Put/seqid=0 2024-11-28T07:24:02,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37052 deadline: 1732778702283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,287 DEBUG [Thread-2988 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8194 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:24:02,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742601_1777 (size=9857) 2024-11-28T07:24:02,305 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/f7ec0545928f43b9bcc22335c0f66e12 2024-11-28T07:24:02,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/7139f8842a2a43789f2effc52803ca47 is 50, key is test_row_1/C:col10/1732778640941/Put/seqid=0 2024-11-28T07:24:02,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742602_1778 (size=9857) 2024-11-28T07:24:02,350 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/7139f8842a2a43789f2effc52803ca47 2024-11-28T07:24:02,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/872df396b9914bee88286f3463bb0b41 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/872df396b9914bee88286f3463bb0b41 2024-11-28T07:24:02,360 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/872df396b9914bee88286f3463bb0b41, entries=100, sequenceid=315, filesize=22.0 K 2024-11-28T07:24:02,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/f7ec0545928f43b9bcc22335c0f66e12 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/f7ec0545928f43b9bcc22335c0f66e12 2024-11-28T07:24:02,368 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/f7ec0545928f43b9bcc22335c0f66e12, entries=100, sequenceid=315, filesize=9.6 K 2024-11-28T07:24:02,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/7139f8842a2a43789f2effc52803ca47 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7139f8842a2a43789f2effc52803ca47 2024-11-28T07:24:02,375 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7139f8842a2a43789f2effc52803ca47, entries=100, sequenceid=315, filesize=9.6 K 2024-11-28T07:24:02,376 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for a6b84436e6ee345d2d4f94cd524e48a2 in 650ms, sequenceid=315, compaction requested=true 2024-11-28T07:24:02,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:02,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:02,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-28T07:24:02,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=184 2024-11-28T07:24:02,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-11-28T07:24:02,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0300 sec 2024-11-28T07:24:02,380 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees in 2.0340 sec 2024-11-28T07:24:02,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-28T07:24:02,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:02,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:02,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:02,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:02,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:02,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:02,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:02,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285ef0c599dc484aa1a799ce215c61f185_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778642118/Put/seqid=0 2024-11-28T07:24:02,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T07:24:02,451 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-11-28T07:24:02,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778702446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:24:02,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees 2024-11-28T07:24:02,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778702446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778702446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,454 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:24:02,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T07:24:02,454 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:24:02,455 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:24:02,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778702452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742603_1779 (size=12454) 2024-11-28T07:24:02,484 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,490 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285ef0c599dc484aa1a799ce215c61f185_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285ef0c599dc484aa1a799ce215c61f185_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:02,492 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/9df510064ce4483fa0864a7eee3597eb, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:02,492 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/9df510064ce4483fa0864a7eee3597eb is 175, key is test_row_0/A:col10/1732778642118/Put/seqid=0 2024-11-28T07:24:02,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742604_1780 (size=31255) 2024-11-28T07:24:02,539 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=345, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/9df510064ce4483fa0864a7eee3597eb 2024-11-28T07:24:02,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T07:24:02,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778702554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778702554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778702554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/154badcf202a4eb7aec5d89e10c6e7b4 is 50, key is test_row_0/B:col10/1732778642118/Put/seqid=0 2024-11-28T07:24:02,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778702558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742605_1781 (size=12301) 2024-11-28T07:24:02,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/154badcf202a4eb7aec5d89e10c6e7b4 2024-11-28T07:24:02,607 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:02,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-28T07:24:02,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:02,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:02,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:02,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] handler.RSProcedureHandler(58): pid=186 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:02,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=186 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:02,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=186 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:02,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/43e89f21c7bf4094ba7abe757088911b is 50, key is test_row_0/C:col10/1732778642118/Put/seqid=0 2024-11-28T07:24:02,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742606_1782 (size=12301) 2024-11-28T07:24:02,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/43e89f21c7bf4094ba7abe757088911b 2024-11-28T07:24:02,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/9df510064ce4483fa0864a7eee3597eb as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9df510064ce4483fa0864a7eee3597eb 2024-11-28T07:24:02,672 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9df510064ce4483fa0864a7eee3597eb, entries=150, sequenceid=345, filesize=30.5 K 2024-11-28T07:24:02,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/154badcf202a4eb7aec5d89e10c6e7b4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/154badcf202a4eb7aec5d89e10c6e7b4 2024-11-28T07:24:02,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/154badcf202a4eb7aec5d89e10c6e7b4, entries=150, sequenceid=345, filesize=12.0 K 2024-11-28T07:24:02,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/43e89f21c7bf4094ba7abe757088911b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/43e89f21c7bf4094ba7abe757088911b 2024-11-28T07:24:02,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/43e89f21c7bf4094ba7abe757088911b, entries=150, sequenceid=345, filesize=12.0 K 2024-11-28T07:24:02,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for a6b84436e6ee345d2d4f94cd524e48a2 in 258ms, sequenceid=345, compaction requested=true 2024-11-28T07:24:02,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:02,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,697 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:24:02,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:24:02,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:02,698 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:24:02,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:24:02,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:02,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:24:02,699 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125702 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:24:02,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:02,699 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:24:02,699 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:02,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,699 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/f1de003d7a5d452ea26ae06ce98c38f6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43513a05b306413bb48ec9452237c254, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/872df396b9914bee88286f3463bb0b41, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9df510064ce4483fa0864a7eee3597eb] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=122.8 K 2024-11-28T07:24:02,699 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:02,699 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/f1de003d7a5d452ea26ae06ce98c38f6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43513a05b306413bb48ec9452237c254, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/872df396b9914bee88286f3463bb0b41, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9df510064ce4483fa0864a7eee3597eb] 2024-11-28T07:24:02,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47442 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:24:02,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,700 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1de003d7a5d452ea26ae06ce98c38f6, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778640220 2024-11-28T07:24:02,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:24:02,700 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:02,700 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3e31027b98024d4fb05452b8fc9e1446, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/58109c171be645a0a28820144f8ac83f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/f7ec0545928f43b9bcc22335c0f66e12, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/154badcf202a4eb7aec5d89e10c6e7b4] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=46.3 K 2024-11-28T07:24:02,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,700 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43513a05b306413bb48ec9452237c254, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732778640286 2024-11-28T07:24:02,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e31027b98024d4fb05452b8fc9e1446, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778640220 2024-11-28T07:24:02,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,701 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 58109c171be645a0a28820144f8ac83f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732778640286 2024-11-28T07:24:02,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 872df396b9914bee88286f3463bb0b41, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732778640941 2024-11-28T07:24:02,701 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting f7ec0545928f43b9bcc22335c0f66e12, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732778640941 2024-11-28T07:24:02,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9df510064ce4483fa0864a7eee3597eb, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1732778642118 2024-11-28T07:24:02,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,703 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 154badcf202a4eb7aec5d89e10c6e7b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1732778642118 2024-11-28T07:24:02,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,714 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:02,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,718 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#664 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:02,718 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/5fe47464a7e34cf098d9645c6c8b8bd8 is 50, key is test_row_0/B:col10/1732778642118/Put/seqid=0 2024-11-28T07:24:02,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,731 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128fe70195ace4246b7a0fa8409e6501b05_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:02,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,733 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128fe70195ace4246b7a0fa8409e6501b05_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:02,733 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128fe70195ace4246b7a0fa8409e6501b05_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:02,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,760 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:02,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-11-28T07:24:02,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T07:24:02,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,764 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-28T07:24:02,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:02,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:02,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:02,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:02,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:02,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:02,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:02,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742607_1783 (size=13119) 2024-11-28T07:24:02,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742608_1784 (size=4469) 2024-11-28T07:24:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,812 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#663 average throughput is 0.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:02,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,813 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4009fd9246844037b234063d865b14ac is 175, key is test_row_0/A:col10/1732778642118/Put/seqid=0 2024-11-28T07:24:02,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742609_1785 (size=32073) 2024-11-28T07:24:02,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778702843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112882958e7a9f5f4c10a20c9ac354bd2eec_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778642446/Put/seqid=0 2024-11-28T07:24:02,856 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4009fd9246844037b234063d865b14ac as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4009fd9246844037b234063d865b14ac 2024-11-28T07:24:02,861 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into 4009fd9246844037b234063d865b14ac(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:02,861 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:02,861 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=12, startTime=1732778642697; duration=0sec 2024-11-28T07:24:02,861 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:02,861 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:24:02,861 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:24:02,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778702849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,863 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47442 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:24:02,863 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:24:02,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,863 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:02,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778702850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,863 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44eda0d912d84309b93a7e9a7ac67194, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c9de82a550a4444d80d5312e2bc4889e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7139f8842a2a43789f2effc52803ca47, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/43e89f21c7bf4094ba7abe757088911b] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=46.3 K 2024-11-28T07:24:02,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778702852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,864 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44eda0d912d84309b93a7e9a7ac67194, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732778640220 2024-11-28T07:24:02,864 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9de82a550a4444d80d5312e2bc4889e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732778640286 2024-11-28T07:24:02,866 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7139f8842a2a43789f2effc52803ca47, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1732778640941 2024-11-28T07:24:02,866 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43e89f21c7bf4094ba7abe757088911b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1732778642118 2024-11-28T07:24:02,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742610_1786 (size=9914) 2024-11-28T07:24:02,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:02,893 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112882958e7a9f5f4c10a20c9ac354bd2eec_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112882958e7a9f5f4c10a20c9ac354bd2eec_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:02,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/0ce6df7280b1438fa45a17267e2d3d18, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:02,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/0ce6df7280b1438fa45a17267e2d3d18 is 175, key is test_row_0/A:col10/1732778642446/Put/seqid=0 2024-11-28T07:24:02,898 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#666 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:02,898 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/59b8357826164ff7948b0b8d5244dbbf is 50, key is test_row_0/C:col10/1732778642118/Put/seqid=0 2024-11-28T07:24:02,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742611_1787 (size=22561) 2024-11-28T07:24:02,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778702952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742612_1788 (size=13119) 2024-11-28T07:24:02,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778702964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778702964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:02,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:02,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778702965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T07:24:03,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778703160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778703174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,189 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/5fe47464a7e34cf098d9645c6c8b8bd8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5fe47464a7e34cf098d9645c6c8b8bd8 2024-11-28T07:24:03,194 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into 5fe47464a7e34cf098d9645c6c8b8bd8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:03,194 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:03,194 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=12, startTime=1732778642698; duration=0sec 2024-11-28T07:24:03,194 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:03,194 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:24:03,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778703196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778703212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,343 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=352, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/0ce6df7280b1438fa45a17267e2d3d18 2024-11-28T07:24:03,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1c1e20cb27c04c6982abc73128c8f9b5 is 50, key is test_row_0/B:col10/1732778642446/Put/seqid=0 2024-11-28T07:24:03,371 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/59b8357826164ff7948b0b8d5244dbbf as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/59b8357826164ff7948b0b8d5244dbbf 2024-11-28T07:24:03,375 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into 59b8357826164ff7948b0b8d5244dbbf(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:03,375 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:03,375 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=12, startTime=1732778642699; duration=0sec 2024-11-28T07:24:03,375 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:03,375 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:24:03,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742613_1789 (size=9857) 2024-11-28T07:24:03,392 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1c1e20cb27c04c6982abc73128c8f9b5 2024-11-28T07:24:03,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/4b1c1857537c4e8cbc774ee5b113f051 is 50, key is test_row_0/C:col10/1732778642446/Put/seqid=0 2024-11-28T07:24:03,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742614_1790 (size=9857) 2024-11-28T07:24:03,436 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/4b1c1857537c4e8cbc774ee5b113f051 2024-11-28T07:24:03,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/0ce6df7280b1438fa45a17267e2d3d18 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/0ce6df7280b1438fa45a17267e2d3d18 2024-11-28T07:24:03,444 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/0ce6df7280b1438fa45a17267e2d3d18, entries=100, sequenceid=352, filesize=22.0 K 2024-11-28T07:24:03,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1c1e20cb27c04c6982abc73128c8f9b5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1c1e20cb27c04c6982abc73128c8f9b5 2024-11-28T07:24:03,451 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1c1e20cb27c04c6982abc73128c8f9b5, entries=100, sequenceid=352, filesize=9.6 K 2024-11-28T07:24:03,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/4b1c1857537c4e8cbc774ee5b113f051 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/4b1c1857537c4e8cbc774ee5b113f051 2024-11-28T07:24:03,457 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/4b1c1857537c4e8cbc774ee5b113f051, entries=100, sequenceid=352, filesize=9.6 K 2024-11-28T07:24:03,458 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=181.14 KB/185490 for a6b84436e6ee345d2d4f94cd524e48a2 in 694ms, sequenceid=352, compaction requested=false 2024-11-28T07:24:03,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:03,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:03,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-11-28T07:24:03,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-11-28T07:24:03,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-11-28T07:24:03,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0050 sec 2024-11-28T07:24:03,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees in 1.0090 sec 2024-11-28T07:24:03,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:03,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-28T07:24:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:03,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778703480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778703482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288d8851f19e224fe5a4cb5037905a506e_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778642849/Put/seqid=0 2024-11-28T07:24:03,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778703503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778703519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742615_1791 (size=14994) 2024-11-28T07:24:03,543 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:03,547 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288d8851f19e224fe5a4cb5037905a506e_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288d8851f19e224fe5a4cb5037905a506e_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:03,548 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/df135a9870e7492b93948ddbe05e2243, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:03,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/df135a9870e7492b93948ddbe05e2243 is 175, key is test_row_0/A:col10/1732778642849/Put/seqid=0 2024-11-28T07:24:03,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T07:24:03,565 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-11-28T07:24:03,569 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:24:03,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees 2024-11-28T07:24:03,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-28T07:24:03,571 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:24:03,571 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:24:03,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:24:03,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742616_1792 (size=39949) 2024-11-28T07:24:03,585 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=386, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/df135a9870e7492b93948ddbe05e2243 2024-11-28T07:24:03,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778703585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778703587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/dca13fd1ea37441c850279ca513e6df3 is 50, key is test_row_0/B:col10/1732778642849/Put/seqid=0 2024-11-28T07:24:03,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742617_1793 (size=12301) 2024-11-28T07:24:03,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-28T07:24:03,725 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:03,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:03,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:03,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:03,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:03,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:03,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:03,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:03,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778703790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:03,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778703793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:03,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-28T07:24:03,879 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:03,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:03,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:03,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:03,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:03,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:03,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:03,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:04,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778704012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:04,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:04,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778704025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:04,034 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:04,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:04,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:04,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,043 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/dca13fd1ea37441c850279ca513e6df3 2024-11-28T07:24:04,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/fbf8b37cd2a0424da7e654095b603f98 is 50, key is test_row_0/C:col10/1732778642849/Put/seqid=0 2024-11-28T07:24:04,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:04,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778704093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:04,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:04,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778704097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:04,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742618_1794 (size=12301) 2024-11-28T07:24:04,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-28T07:24:04,188 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:04,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:04,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:04,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,341 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:04,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:04,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:04,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,494 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:04,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:04,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:04,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=386 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/fbf8b37cd2a0424da7e654095b603f98 2024-11-28T07:24:04,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/df135a9870e7492b93948ddbe05e2243 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/df135a9870e7492b93948ddbe05e2243 2024-11-28T07:24:04,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/df135a9870e7492b93948ddbe05e2243, entries=200, sequenceid=386, filesize=39.0 K 2024-11-28T07:24:04,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/dca13fd1ea37441c850279ca513e6df3 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dca13fd1ea37441c850279ca513e6df3 2024-11-28T07:24:04,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dca13fd1ea37441c850279ca513e6df3, entries=150, sequenceid=386, filesize=12.0 K 2024-11-28T07:24:04,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/fbf8b37cd2a0424da7e654095b603f98 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fbf8b37cd2a0424da7e654095b603f98 2024-11-28T07:24:04,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fbf8b37cd2a0424da7e654095b603f98, entries=150, sequenceid=386, filesize=12.0 K 2024-11-28T07:24:04,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for a6b84436e6ee345d2d4f94cd524e48a2 in 1073ms, sequenceid=386, compaction requested=true 2024-11-28T07:24:04,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:04,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,548 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:24:04,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,549 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94583 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:24:04,549 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:24:04,549 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,549 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4009fd9246844037b234063d865b14ac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/0ce6df7280b1438fa45a17267e2d3d18, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/df135a9870e7492b93948ddbe05e2243] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=92.4 K 2024-11-28T07:24:04,549 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,549 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4009fd9246844037b234063d865b14ac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/0ce6df7280b1438fa45a17267e2d3d18, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/df135a9870e7492b93948ddbe05e2243] 2024-11-28T07:24:04,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,550 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4009fd9246844037b234063d865b14ac, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1732778642118 2024-11-28T07:24:04,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,550 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ce6df7280b1438fa45a17267e2d3d18, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778642445 2024-11-28T07:24:04,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,551 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting df135a9870e7492b93948ddbe05e2243, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1732778642842 2024-11-28T07:24:04,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,553 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:24:04,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:04,560 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:24:04,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:24:04,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:04,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:24:04,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,561 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:24:04,561 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:24:04,561 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,562 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5fe47464a7e34cf098d9645c6c8b8bd8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1c1e20cb27c04c6982abc73128c8f9b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dca13fd1ea37441c850279ca513e6df3] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=34.5 K 2024-11-28T07:24:04,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,563 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fe47464a7e34cf098d9645c6c8b8bd8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1732778642118 2024-11-28T07:24:04,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,563 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c1e20cb27c04c6982abc73128c8f9b5, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778642445 2024-11-28T07:24:04,563 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting dca13fd1ea37441c850279ca513e6df3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1732778642842 2024-11-28T07:24:04,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:04,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,568 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:04,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,581 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112893532ed6535d49c2896e9d5c8dda08ef_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:04,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,585 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112893532ed6535d49c2896e9d5c8dda08ef_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:04,585 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112893532ed6535d49c2896e9d5c8dda08ef_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:04,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,589 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#673 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:04,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,589 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/5048ecfb546e4b20bcd8c4e8a26cde25 is 50, key is test_row_0/B:col10/1732778642849/Put/seqid=0 2024-11-28T07:24:04,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:04,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:24:04,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:04,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:04,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:04,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:04,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:04,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:04,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742619_1795 (size=4469) 2024-11-28T07:24:04,639 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#672 average throughput is 0.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:04,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,640 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/67b1b0b9788743239f660177bfb99e0f is 175, key is test_row_0/A:col10/1732778642849/Put/seqid=0 2024-11-28T07:24:04,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,647 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:04,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:04,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:04,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742620_1796 (size=13221) 2024-11-28T07:24:04,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128011a08955c5445d18a15d561b481df43_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778644625/Put/seqid=0 2024-11-28T07:24:04,667 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/5048ecfb546e4b20bcd8c4e8a26cde25 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5048ecfb546e4b20bcd8c4e8a26cde25 2024-11-28T07:24:04,672 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into 5048ecfb546e4b20bcd8c4e8a26cde25(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:04,672 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:04,673 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=13, startTime=1732778644560; duration=0sec 2024-11-28T07:24:04,673 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:04,673 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:24:04,673 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:24:04,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-28T07:24:04,675 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:24:04,675 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:24:04,675 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,675 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/59b8357826164ff7948b0b8d5244dbbf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/4b1c1857537c4e8cbc774ee5b113f051, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fbf8b37cd2a0424da7e654095b603f98] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=34.5 K 2024-11-28T07:24:04,675 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 59b8357826164ff7948b0b8d5244dbbf, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=345, earliestPutTs=1732778642118 2024-11-28T07:24:04,676 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b1c1857537c4e8cbc774ee5b113f051, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1732778642445 2024-11-28T07:24:04,676 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting fbf8b37cd2a0424da7e654095b603f98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1732778642842 2024-11-28T07:24:04,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742621_1797 (size=32175) 2024-11-28T07:24:04,694 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#675 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:04,695 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/d53994383a08405689d049c145c19296 is 50, key is test_row_0/C:col10/1732778642849/Put/seqid=0 2024-11-28T07:24:04,695 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/67b1b0b9788743239f660177bfb99e0f as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/67b1b0b9788743239f660177bfb99e0f 2024-11-28T07:24:04,700 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into 67b1b0b9788743239f660177bfb99e0f(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:04,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:04,701 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=13, startTime=1732778644548; duration=0sec 2024-11-28T07:24:04,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:04,701 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:24:04,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:04,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778704712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:04,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:04,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778704714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:04,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742622_1798 (size=20074) 2024-11-28T07:24:04,734 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:04,738 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128011a08955c5445d18a15d561b481df43_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128011a08955c5445d18a15d561b481df43_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:04,740 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/1df5a2381be042889754435c5833f463, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:04,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/1df5a2381be042889754435c5833f463 is 175, key is test_row_0/A:col10/1732778644625/Put/seqid=0 2024-11-28T07:24:04,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742623_1799 (size=13221) 2024-11-28T07:24:04,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742624_1800 (size=57333) 2024-11-28T07:24:04,784 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=397, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/1df5a2381be042889754435c5833f463 2024-11-28T07:24:04,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/bab8d7edd571448d8268021472dbdf5e is 50, key is test_row_0/B:col10/1732778644625/Put/seqid=0 2024-11-28T07:24:04,800 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:04,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:04,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:04,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:04,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778704816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:04,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:04,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778704819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:04,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742625_1801 (size=12301) 2024-11-28T07:24:04,953 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:04,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:04,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:04,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:04,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:04,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778705019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778705020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778705024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778705030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,106 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:05,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:05,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:05,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,161 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/d53994383a08405689d049c145c19296 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/d53994383a08405689d049c145c19296 2024-11-28T07:24:05,169 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into d53994383a08405689d049c145c19296(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:05,169 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:05,169 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=13, startTime=1732778644560; duration=0sec 2024-11-28T07:24:05,169 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:05,169 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:24:05,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/bab8d7edd571448d8268021472dbdf5e 2024-11-28T07:24:05,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/fa2743a7c90044f898e6f5955e6236b1 is 50, key is test_row_0/C:col10/1732778644625/Put/seqid=0 2024-11-28T07:24:05,266 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:05,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:05,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:05,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742626_1802 (size=12301) 2024-11-28T07:24:05,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/fa2743a7c90044f898e6f5955e6236b1 2024-11-28T07:24:05,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/1df5a2381be042889754435c5833f463 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/1df5a2381be042889754435c5833f463 2024-11-28T07:24:05,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/1df5a2381be042889754435c5833f463, entries=300, sequenceid=397, filesize=56.0 K 2024-11-28T07:24:05,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/bab8d7edd571448d8268021472dbdf5e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/bab8d7edd571448d8268021472dbdf5e 2024-11-28T07:24:05,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/bab8d7edd571448d8268021472dbdf5e, entries=150, sequenceid=397, filesize=12.0 K 2024-11-28T07:24:05,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/fa2743a7c90044f898e6f5955e6236b1 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fa2743a7c90044f898e6f5955e6236b1 2024-11-28T07:24:05,302 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fa2743a7c90044f898e6f5955e6236b1, entries=150, sequenceid=397, filesize=12.0 K 2024-11-28T07:24:05,303 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for a6b84436e6ee345d2d4f94cd524e48a2 in 672ms, sequenceid=397, compaction requested=false 2024-11-28T07:24:05,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:05,327 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T07:24:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:05,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:05,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:05,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778705342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778705343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128188795f79096459abdcb2763e97f8075_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778645325/Put/seqid=0 2024-11-28T07:24:05,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742627_1803 (size=12454) 2024-11-28T07:24:05,419 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:05,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:05,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:05,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,420 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:05,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,428 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128188795f79096459abdcb2763e97f8075_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128188795f79096459abdcb2763e97f8075_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:05,430 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4fd9740d68e54e43a776268b8727dfe2, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:05,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4fd9740d68e54e43a776268b8727dfe2 is 175, key is test_row_0/A:col10/1732778645325/Put/seqid=0 2024-11-28T07:24:05,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742628_1804 (size=31255) 2024-11-28T07:24:05,439 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=426, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4fd9740d68e54e43a776268b8727dfe2 2024-11-28T07:24:05,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778705447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778705447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/596179225c6e46bba1f0c24bcb78fdc8 is 50, key is test_row_0/B:col10/1732778645325/Put/seqid=0 2024-11-28T07:24:05,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742629_1805 (size=12301) 2024-11-28T07:24:05,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/596179225c6e46bba1f0c24bcb78fdc8 2024-11-28T07:24:05,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/27b0cf419d424311b41f1f2baee76fb4 is 50, key is test_row_0/C:col10/1732778645325/Put/seqid=0 2024-11-28T07:24:05,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742630_1806 (size=12301) 2024-11-28T07:24:05,556 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/27b0cf419d424311b41f1f2baee76fb4 2024-11-28T07:24:05,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/4fd9740d68e54e43a776268b8727dfe2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4fd9740d68e54e43a776268b8727dfe2 2024-11-28T07:24:05,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4fd9740d68e54e43a776268b8727dfe2, entries=150, sequenceid=426, filesize=30.5 K 2024-11-28T07:24:05,573 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:05,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/596179225c6e46bba1f0c24bcb78fdc8 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/596179225c6e46bba1f0c24bcb78fdc8 2024-11-28T07:24:05,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:05,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:05,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,573 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/596179225c6e46bba1f0c24bcb78fdc8, entries=150, sequenceid=426, filesize=12.0 K 2024-11-28T07:24:05,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/27b0cf419d424311b41f1f2baee76fb4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/27b0cf419d424311b41f1f2baee76fb4 2024-11-28T07:24:05,583 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/27b0cf419d424311b41f1f2baee76fb4, entries=150, sequenceid=426, filesize=12.0 K 2024-11-28T07:24:05,584 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for a6b84436e6ee345d2d4f94cd524e48a2 in 257ms, sequenceid=426, compaction requested=true 2024-11-28T07:24:05,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:05,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:24:05,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:05,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:24:05,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:05,584 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:24:05,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:24:05,584 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:24:05,584 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:24:05,587 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:24:05,587 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:24:05,587 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,588 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5048ecfb546e4b20bcd8c4e8a26cde25, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/bab8d7edd571448d8268021472dbdf5e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/596179225c6e46bba1f0c24bcb78fdc8] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=36.9 K 2024-11-28T07:24:05,588 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120763 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:24:05,588 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:24:05,588 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,588 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/67b1b0b9788743239f660177bfb99e0f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/1df5a2381be042889754435c5833f463, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4fd9740d68e54e43a776268b8727dfe2] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=117.9 K 2024-11-28T07:24:05,588 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,588 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/67b1b0b9788743239f660177bfb99e0f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/1df5a2381be042889754435c5833f463, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4fd9740d68e54e43a776268b8727dfe2] 2024-11-28T07:24:05,588 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5048ecfb546e4b20bcd8c4e8a26cde25, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1732778642842 2024-11-28T07:24:05,589 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 67b1b0b9788743239f660177bfb99e0f, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1732778642842 2024-11-28T07:24:05,592 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting bab8d7edd571448d8268021472dbdf5e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732778644611 2024-11-28T07:24:05,593 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1df5a2381be042889754435c5833f463, keycount=300, bloomtype=ROW, size=56.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732778643480 2024-11-28T07:24:05,593 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 596179225c6e46bba1f0c24bcb78fdc8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732778644710 2024-11-28T07:24:05,594 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fd9740d68e54e43a776268b8727dfe2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732778644710 2024-11-28T07:24:05,610 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#681 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:05,611 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/a8d8502a9cf14e899f55b8067b3ebda5 is 50, key is test_row_0/B:col10/1732778645325/Put/seqid=0 2024-11-28T07:24:05,625 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:05,648 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128cbe92751a3894ab8b1eaf445309fe490_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:05,650 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128cbe92751a3894ab8b1eaf445309fe490_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:05,650 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cbe92751a3894ab8b1eaf445309fe490_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:05,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T07:24:05,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742632_1808 (size=4469) 2024-11-28T07:24:05,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:05,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742631_1807 (size=13323) 2024-11-28T07:24:05,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:05,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:05,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:05,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:05,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:05,665 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#682 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:05,665 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/6dfa4d2fd3484e38a67da2389d6038af is 175, key is test_row_0/A:col10/1732778645325/Put/seqid=0 2024-11-28T07:24:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-28T07:24:05,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b550da4f62c54bffb58fe934095a2db2_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778645330/Put/seqid=0 2024-11-28T07:24:05,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742633_1809 (size=32277) 2024-11-28T07:24:05,685 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/6dfa4d2fd3484e38a67da2389d6038af as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6dfa4d2fd3484e38a67da2389d6038af 2024-11-28T07:24:05,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742634_1810 (size=12454) 2024-11-28T07:24:05,726 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:05,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:05,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:05,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,736 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into 6dfa4d2fd3484e38a67da2389d6038af(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:05,736 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:05,736 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=13, startTime=1732778645584; duration=0sec 2024-11-28T07:24:05,736 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:05,736 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:24:05,736 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T07:24:05,739 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T07:24:05,739 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:24:05,739 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,739 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/d53994383a08405689d049c145c19296, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fa2743a7c90044f898e6f5955e6236b1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/27b0cf419d424311b41f1f2baee76fb4] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=36.9 K 2024-11-28T07:24:05,739 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting d53994383a08405689d049c145c19296, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=386, earliestPutTs=1732778642842 2024-11-28T07:24:05,740 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting fa2743a7c90044f898e6f5955e6236b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1732778644611 2024-11-28T07:24:05,740 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 27b0cf419d424311b41f1f2baee76fb4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732778644710 2024-11-28T07:24:05,753 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#684 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:05,754 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/0a80c7436ae741028c0c3b0a70269782 is 50, key is test_row_0/C:col10/1732778645325/Put/seqid=0 2024-11-28T07:24:05,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742635_1811 (size=13323) 2024-11-28T07:24:05,800 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/0a80c7436ae741028c0c3b0a70269782 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0a80c7436ae741028c0c3b0a70269782 2024-11-28T07:24:05,805 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into 0a80c7436ae741028c0c3b0a70269782(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:05,805 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:05,805 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=13, startTime=1732778645584; duration=0sec 2024-11-28T07:24:05,805 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:05,805 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:24:05,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778705813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778705815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,879 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:05,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:05,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:05,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:05,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:05,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778705918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:05,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778705918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:06,033 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:06,034 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:06,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:06,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,069 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/a8d8502a9cf14e899f55b8067b3ebda5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a8d8502a9cf14e899f55b8067b3ebda5 2024-11-28T07:24:06,073 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into a8d8502a9cf14e899f55b8067b3ebda5(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:06,073 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:06,073 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=13, startTime=1732778645584; duration=0sec 2024-11-28T07:24:06,073 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:06,073 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:24:06,109 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:06,114 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b550da4f62c54bffb58fe934095a2db2_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b550da4f62c54bffb58fe934095a2db2_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:06,115 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/31483993556d4a53b074686166756e58, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:06,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/31483993556d4a53b074686166756e58 is 175, key is test_row_0/A:col10/1732778645330/Put/seqid=0 2024-11-28T07:24:06,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778706123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:06,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778706123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:06,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742636_1812 (size=31255) 2024-11-28T07:24:06,186 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:06,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:06,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:06,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,187 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,339 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:06,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:06,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:06,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:06,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778706426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:06,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778706432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:06,493 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:06,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:06,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:06,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,494 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,557 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=438, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/31483993556d4a53b074686166756e58 2024-11-28T07:24:06,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/7b2b6028c2474eb5ad9827832b0952c5 is 50, key is test_row_0/B:col10/1732778645330/Put/seqid=0 2024-11-28T07:24:06,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742637_1813 (size=12301) 2024-11-28T07:24:06,647 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:06,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:06,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:06,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,804 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:06,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:06,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:06,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:06,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778706933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:06,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:06,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778706936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:06,956 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:06,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:06,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:06,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:06,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:06,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:07,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/7b2b6028c2474eb5ad9827832b0952c5 2024-11-28T07:24:07,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:07,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37066 deadline: 1732778707034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:07,036 DEBUG [Thread-2986 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4187 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:24:07,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:07,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37058 deadline: 1732778707039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:07,041 DEBUG [Thread-2984 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4189 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., hostname=592d8b721726,33143,1732778474488, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T07:24:07,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/44e7202183114abbb1d34b2378a112cc is 50, key is test_row_0/C:col10/1732778645330/Put/seqid=0 2024-11-28T07:24:07,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742638_1814 (size=12301) 2024-11-28T07:24:07,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/44e7202183114abbb1d34b2378a112cc 2024-11-28T07:24:07,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/31483993556d4a53b074686166756e58 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31483993556d4a53b074686166756e58 2024-11-28T07:24:07,109 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:07,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:07,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:07,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:07,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:07,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:07,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:07,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:07,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31483993556d4a53b074686166756e58, entries=150, sequenceid=438, filesize=30.5 K 2024-11-28T07:24:07,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/7b2b6028c2474eb5ad9827832b0952c5 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7b2b6028c2474eb5ad9827832b0952c5 2024-11-28T07:24:07,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7b2b6028c2474eb5ad9827832b0952c5, entries=150, sequenceid=438, filesize=12.0 K 2024-11-28T07:24:07,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/44e7202183114abbb1d34b2378a112cc as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44e7202183114abbb1d34b2378a112cc 2024-11-28T07:24:07,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44e7202183114abbb1d34b2378a112cc, entries=150, sequenceid=438, filesize=12.0 K 2024-11-28T07:24:07,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a6b84436e6ee345d2d4f94cd524e48a2 in 1468ms, sequenceid=438, compaction requested=false 2024-11-28T07:24:07,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:07,262 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:07,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-11-28T07:24:07,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:07,263 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T07:24:07,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:07,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:07,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:07,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:07,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:07,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:07,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ad925c42b0d243f9bb154bdce738200c_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778645811/Put/seqid=0 2024-11-28T07:24:07,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742639_1815 (size=12454) 2024-11-28T07:24:07,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,313 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ad925c42b0d243f9bb154bdce738200c_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ad925c42b0d243f9bb154bdce738200c_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:07,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/792d1e2ce78e44399f1a82fd978847ab, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:07,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/792d1e2ce78e44399f1a82fd978847ab is 175, key is test_row_0/A:col10/1732778645811/Put/seqid=0 2024-11-28T07:24:07,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742640_1816 (size=31255) 2024-11-28T07:24:07,329 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=465, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/792d1e2ce78e44399f1a82fd978847ab 2024-11-28T07:24:07,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/db2280d27ac74d8f805366d1c4f4bc87 is 50, key is test_row_0/B:col10/1732778645811/Put/seqid=0 2024-11-28T07:24:07,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742641_1817 (size=12301) 2024-11-28T07:24:07,373 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/db2280d27ac74d8f805366d1c4f4bc87 2024-11-28T07:24:07,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/e2dd21c5e3cb4a0b9be2616c5d192e80 is 50, key is test_row_0/C:col10/1732778645811/Put/seqid=0 2024-11-28T07:24:07,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742642_1818 (size=12301) 2024-11-28T07:24:07,437 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/e2dd21c5e3cb4a0b9be2616c5d192e80 2024-11-28T07:24:07,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/792d1e2ce78e44399f1a82fd978847ab as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/792d1e2ce78e44399f1a82fd978847ab 2024-11-28T07:24:07,445 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/792d1e2ce78e44399f1a82fd978847ab, entries=150, sequenceid=465, filesize=30.5 K 2024-11-28T07:24:07,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/db2280d27ac74d8f805366d1c4f4bc87 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/db2280d27ac74d8f805366d1c4f4bc87 2024-11-28T07:24:07,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,450 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/db2280d27ac74d8f805366d1c4f4bc87, entries=150, sequenceid=465, filesize=12.0 K 2024-11-28T07:24:07,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/e2dd21c5e3cb4a0b9be2616c5d192e80 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e2dd21c5e3cb4a0b9be2616c5d192e80 2024-11-28T07:24:07,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,456 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e2dd21c5e3cb4a0b9be2616c5d192e80, entries=150, sequenceid=465, filesize=12.0 K 2024-11-28T07:24:07,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,457 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for a6b84436e6ee345d2d4f94cd524e48a2 in 194ms, sequenceid=465, compaction requested=true 2024-11-28T07:24:07,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:07,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:07,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=188 2024-11-28T07:24:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=188 2024-11-28T07:24:07,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=188, resume processing ppid=187 2024-11-28T07:24:07,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, ppid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.8870 sec 2024-11-28T07:24:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees in 3.8920 sec 2024-11-28T07:24:07,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-11-28T07:24:07,675 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 187 completed 2024-11-28T07:24:07,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,677 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:24:07,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees 2024-11-28T07:24:07,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-28T07:24:07,678 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:24:07,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,679 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:24:07,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:24:07,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-28T07:24:07,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,830 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-11-28T07:24:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:07,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:07,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-11-28T07:24:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-11-28T07:24:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,834 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=189 2024-11-28T07:24:07,834 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 153 msec 2024-11-28T07:24:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees in 157 msec 2024-11-28T07:24:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-28T07:24:07,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,980 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 189 completed 2024-11-28T07:24:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,982 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T07:24:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees 2024-11-28T07:24:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-28T07:24:07,984 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=191, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T07:24:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,985 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=191, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T07:24:07,985 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T07:24:07,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T07:24:08,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:08,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:08,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:08,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:08,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:08,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:08,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c1b97235bbaf41328ae6019255fb2516_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778647978/Put/seqid=0 2024-11-28T07:24:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-28T07:24:08,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742643_1819 (size=22618) 2024-11-28T07:24:08,097 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:08,100 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c1b97235bbaf41328ae6019255fb2516_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c1b97235bbaf41328ae6019255fb2516_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:08,102 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/b3d86e104a44476abc6a3533ae6ff069, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:08,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/b3d86e104a44476abc6a3533ae6ff069 is 175, key is test_row_0/A:col10/1732778647978/Put/seqid=0 2024-11-28T07:24:08,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778708126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:08,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778708130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:08,141 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:08,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742644_1820 (size=66023) 2024-11-28T07:24:08,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,143 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=477, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/b3d86e104a44476abc6a3533ae6ff069 2024-11-28T07:24:08,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1a25ba7351f6478bb8fb261a8847753e is 50, key is test_row_0/B:col10/1732778647978/Put/seqid=0 2024-11-28T07:24:08,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742645_1821 (size=12301) 2024-11-28T07:24:08,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1a25ba7351f6478bb8fb261a8847753e 2024-11-28T07:24:08,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/f5331fc1abcd4d09acfe7787b6473ef0 is 50, key is test_row_0/C:col10/1732778647978/Put/seqid=0 2024-11-28T07:24:08,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778708232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:08,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778708236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:08,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742646_1822 (size=12301) 2024-11-28T07:24:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-28T07:24:08,294 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:08,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:08,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:08,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:08,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37038 deadline: 1732778708440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:08,448 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:08,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:08,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T07:24:08,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:08,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33143 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37056 deadline: 1732778708442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 2024-11-28T07:24:08,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-28T07:24:08,601 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:08,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:08,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:08,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,674 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/f5331fc1abcd4d09acfe7787b6473ef0 2024-11-28T07:24:08,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/b3d86e104a44476abc6a3533ae6ff069 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/b3d86e104a44476abc6a3533ae6ff069 2024-11-28T07:24:08,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/b3d86e104a44476abc6a3533ae6ff069, entries=350, sequenceid=477, filesize=64.5 K 2024-11-28T07:24:08,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1a25ba7351f6478bb8fb261a8847753e as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a25ba7351f6478bb8fb261a8847753e 2024-11-28T07:24:08,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a25ba7351f6478bb8fb261a8847753e, entries=150, sequenceid=477, filesize=12.0 K 2024-11-28T07:24:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/f5331fc1abcd4d09acfe7787b6473ef0 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/f5331fc1abcd4d09acfe7787b6473ef0 2024-11-28T07:24:08,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/f5331fc1abcd4d09acfe7787b6473ef0, entries=150, sequenceid=477, filesize=12.0 K 2024-11-28T07:24:08,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for a6b84436e6ee345d2d4f94cd524e48a2 in 650ms, sequenceid=477, compaction requested=true 2024-11-28T07:24:08,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:08,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T07:24:08,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:08,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T07:24:08,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:08,694 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:24:08,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a6b84436e6ee345d2d4f94cd524e48a2:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T07:24:08,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T07:24:08,694 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:24:08,695 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 160810 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:24:08,695 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/A is initiating minor compaction (all files) 2024-11-28T07:24:08,695 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/A in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,696 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6dfa4d2fd3484e38a67da2389d6038af, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31483993556d4a53b074686166756e58, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/792d1e2ce78e44399f1a82fd978847ab, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/b3d86e104a44476abc6a3533ae6ff069] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=157.0 K 2024-11-28T07:24:08,696 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,696 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. files: [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6dfa4d2fd3484e38a67da2389d6038af, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31483993556d4a53b074686166756e58, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/792d1e2ce78e44399f1a82fd978847ab, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/b3d86e104a44476abc6a3533ae6ff069] 2024-11-28T07:24:08,696 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6dfa4d2fd3484e38a67da2389d6038af, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732778644710 2024-11-28T07:24:08,696 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31483993556d4a53b074686166756e58, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732778645330 2024-11-28T07:24:08,697 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:24:08,697 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting 792d1e2ce78e44399f1a82fd978847ab, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1732778645809 2024-11-28T07:24:08,697 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/B is initiating minor compaction (all files) 2024-11-28T07:24:08,697 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/B in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,697 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a8d8502a9cf14e899f55b8067b3ebda5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7b2b6028c2474eb5ad9827832b0952c5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/db2280d27ac74d8f805366d1c4f4bc87, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a25ba7351f6478bb8fb261a8847753e] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=49.0 K 2024-11-28T07:24:08,697 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3d86e104a44476abc6a3533ae6ff069, keycount=350, bloomtype=ROW, size=64.5 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1732778647978 2024-11-28T07:24:08,698 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting a8d8502a9cf14e899f55b8067b3ebda5, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732778644710 2024-11-28T07:24:08,698 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b2b6028c2474eb5ad9827832b0952c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732778645330 2024-11-28T07:24:08,699 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting db2280d27ac74d8f805366d1c4f4bc87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1732778645809 2024-11-28T07:24:08,700 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a25ba7351f6478bb8fb261a8847753e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1732778647978 2024-11-28T07:24:08,719 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:08,723 DEBUG [Thread-2999 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bb75907 to 127.0.0.1:56318 2024-11-28T07:24:08,723 DEBUG [Thread-2999 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:08,724 DEBUG [Thread-3001 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c1d3a95 to 127.0.0.1:56318 2024-11-28T07:24:08,724 DEBUG [Thread-3001 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:08,726 DEBUG [Thread-2995 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f1754bc to 127.0.0.1:56318 2024-11-28T07:24:08,726 DEBUG [Thread-2995 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:08,727 DEBUG [Thread-2997 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d9113f3 to 127.0.0.1:56318 2024-11-28T07:24:08,727 DEBUG [Thread-2997 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:08,728 DEBUG [Thread-2993 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7846cb78 to 127.0.0.1:56318 2024-11-28T07:24:08,728 DEBUG [Thread-2993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:08,729 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411287025f097aa7f4de8b075649accff65f0_a6b84436e6ee345d2d4f94cd524e48a2 store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:08,730 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#B#compaction#694 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:08,731 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411287025f097aa7f4de8b075649accff65f0_a6b84436e6ee345d2d4f94cd524e48a2, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:08,731 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1338b29674644b2da30ac7084c9e2d58 is 50, key is test_row_0/B:col10/1732778647978/Put/seqid=0 2024-11-28T07:24:08,731 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287025f097aa7f4de8b075649accff65f0_a6b84436e6ee345d2d4f94cd524e48a2 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:08,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742647_1823 (size=13459) 2024-11-28T07:24:08,741 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/1338b29674644b2da30ac7084c9e2d58 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1338b29674644b2da30ac7084c9e2d58 2024-11-28T07:24:08,745 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/B of a6b84436e6ee345d2d4f94cd524e48a2 into 1338b29674644b2da30ac7084c9e2d58(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:08,745 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:08,745 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/B, priority=12, startTime=1732778648694; duration=0sec 2024-11-28T07:24:08,745 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T07:24:08,745 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:B 2024-11-28T07:24:08,745 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T07:24:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33143 {}] regionserver.HRegion(8581): Flush requested on a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:08,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T07:24:08,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:08,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:08,747 DEBUG [Thread-2982 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06094c70 to 127.0.0.1:56318 2024-11-28T07:24:08,747 DEBUG [Thread-2982 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:08,749 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T07:24:08,749 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1540): a6b84436e6ee345d2d4f94cd524e48a2/C is initiating minor compaction (all files) 2024-11-28T07:24:08,749 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a6b84436e6ee345d2d4f94cd524e48a2/C in TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,750 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0a80c7436ae741028c0c3b0a70269782, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44e7202183114abbb1d34b2378a112cc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e2dd21c5e3cb4a0b9be2616c5d192e80, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/f5331fc1abcd4d09acfe7787b6473ef0] into tmpdir=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp, totalSize=49.0 K 2024-11-28T07:24:08,751 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a80c7436ae741028c0c3b0a70269782, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1732778644710 2024-11-28T07:24:08,752 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting 44e7202183114abbb1d34b2378a112cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732778645330 2024-11-28T07:24:08,753 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting e2dd21c5e3cb4a0b9be2616c5d192e80, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=465, earliestPutTs=1732778645809 2024-11-28T07:24:08,753 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] compactions.Compactor(224): Compacting f5331fc1abcd4d09acfe7787b6473ef0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=477, earliestPutTs=1732778647978 2024-11-28T07:24:08,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742648_1824 (size=4469) 2024-11-28T07:24:08,754 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:08,754 DEBUG [Thread-2990 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:56318 2024-11-28T07:24:08,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:08,754 DEBUG [Thread-2990 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:08,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:08,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,755 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#A#compaction#693 average throughput is 0.68 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:08,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,757 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/7c047ade1c1e40059774fa0ab3096e45 is 175, key is test_row_0/A:col10/1732778647978/Put/seqid=0 2024-11-28T07:24:08,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112843b9686f86c44bf6919d6391bd309e3c_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778648124/Put/seqid=0 2024-11-28T07:24:08,764 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a6b84436e6ee345d2d4f94cd524e48a2#C#compaction#696 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T07:24:08,765 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/01dbd610ffb046e2a2344386dfdefe0b is 50, key is test_row_0/C:col10/1732778647978/Put/seqid=0 2024-11-28T07:24:08,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742649_1825 (size=32413) 2024-11-28T07:24:08,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742650_1826 (size=12454) 2024-11-28T07:24:08,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742651_1827 (size=13459) 2024-11-28T07:24:08,782 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/7c047ade1c1e40059774fa0ab3096e45 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/7c047ade1c1e40059774fa0ab3096e45 2024-11-28T07:24:08,787 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/01dbd610ffb046e2a2344386dfdefe0b as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/01dbd610ffb046e2a2344386dfdefe0b 2024-11-28T07:24:08,787 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/A of a6b84436e6ee345d2d4f94cd524e48a2 into 7c047ade1c1e40059774fa0ab3096e45(size=31.7 K), total size for store is 31.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:08,787 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:08,787 INFO [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/A, priority=12, startTime=1732778648693; duration=0sec 2024-11-28T07:24:08,787 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:08,787 DEBUG [RS:0;592d8b721726:33143-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:A 2024-11-28T07:24:08,791 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in a6b84436e6ee345d2d4f94cd524e48a2/C of a6b84436e6ee345d2d4f94cd524e48a2 into 01dbd610ffb046e2a2344386dfdefe0b(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T07:24:08,791 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:08,791 INFO [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2., storeName=a6b84436e6ee345d2d4f94cd524e48a2/C, priority=12, startTime=1732778648694; duration=0sec 2024-11-28T07:24:08,791 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T07:24:08,791 DEBUG [RS:0;592d8b721726:33143-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a6b84436e6ee345d2d4f94cd524e48a2:C 2024-11-28T07:24:08,906 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:08,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:08,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:08,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:08,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,059 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:09,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:09,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:09,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-28T07:24:09,178 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:09,184 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112843b9686f86c44bf6919d6391bd309e3c_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112843b9686f86c44bf6919d6391bd309e3c_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:09,185 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/5eda9d30830e4c74a759aadfa91233c2, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:09,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/5eda9d30830e4c74a759aadfa91233c2 is 175, key is test_row_0/A:col10/1732778648124/Put/seqid=0 2024-11-28T07:24:09,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742652_1828 (size=31255) 2024-11-28T07:24:09,212 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:09,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:09,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:09,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,365 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:09,365 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:09,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:09,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,518 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:09,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:09,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:09,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,590 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=504, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/5eda9d30830e4c74a759aadfa91233c2 2024-11-28T07:24:09,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/243b78e0c6f246efb34854140a24469d is 50, key is test_row_0/B:col10/1732778648124/Put/seqid=0 2024-11-28T07:24:09,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742653_1829 (size=12301) 2024-11-28T07:24:09,671 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:09,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:09,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:09,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,672 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,823 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:09,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:09,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:09,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,976 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:09,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:09,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:09,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:09,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:10,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/243b78e0c6f246efb34854140a24469d 2024-11-28T07:24:10,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/7fd5f80b3b3745a6a4dcffe09c6649ac is 50, key is test_row_0/C:col10/1732778648124/Put/seqid=0 2024-11-28T07:24:10,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742654_1830 (size=12301) 2024-11-28T07:24:10,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-28T07:24:10,130 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:10,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:10,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-1 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:10,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:10,282 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:10,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:10,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:10,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. as already flushing 2024-11-28T07:24:10,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:10,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] handler.RSProcedureHandler(58): pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:10,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-2 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=192 java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:10,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4114): Remote procedure failed, pid=192 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T07:24:10,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=504 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/7fd5f80b3b3745a6a4dcffe09c6649ac 2024-11-28T07:24:10,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/5eda9d30830e4c74a759aadfa91233c2 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/5eda9d30830e4c74a759aadfa91233c2 2024-11-28T07:24:10,425 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/5eda9d30830e4c74a759aadfa91233c2, entries=150, sequenceid=504, filesize=30.5 K 2024-11-28T07:24:10,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/243b78e0c6f246efb34854140a24469d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/243b78e0c6f246efb34854140a24469d 2024-11-28T07:24:10,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/243b78e0c6f246efb34854140a24469d, entries=150, sequenceid=504, filesize=12.0 K 2024-11-28T07:24:10,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/7fd5f80b3b3745a6a4dcffe09c6649ac as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7fd5f80b3b3745a6a4dcffe09c6649ac 2024-11-28T07:24:10,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7fd5f80b3b3745a6a4dcffe09c6649ac, entries=150, sequenceid=504, filesize=12.0 K 2024-11-28T07:24:10,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=6.71 KB/6870 for a6b84436e6ee345d2d4f94cd524e48a2 in 1688ms, sequenceid=504, compaction requested=false 2024-11-28T07:24:10,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:10,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:10,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33143 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=192 2024-11-28T07:24:10,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:10,435 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-28T07:24:10,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:10,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:10,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:10,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:10,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:10,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:10,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b5bb6dfe6139438b8c20d13c6344981f_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_0/A:col10/1732778648751/Put/seqid=0 2024-11-28T07:24:10,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742655_1831 (size=7374) 2024-11-28T07:24:10,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:10,851 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b5bb6dfe6139438b8c20d13c6344981f_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b5bb6dfe6139438b8c20d13c6344981f_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:10,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/eb27574b45a5456b8173c53940013b0d, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:10,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/eb27574b45a5456b8173c53940013b0d is 175, key is test_row_0/A:col10/1732778648751/Put/seqid=0 2024-11-28T07:24:10,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742656_1832 (size=13865) 2024-11-28T07:24:10,856 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=510, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/eb27574b45a5456b8173c53940013b0d 2024-11-28T07:24:10,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/b77b4d160c4546a7b60b6b8ba5bcab99 is 50, key is test_row_0/B:col10/1732778648751/Put/seqid=0 2024-11-28T07:24:10,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742657_1833 (size=7415) 2024-11-28T07:24:11,047 DEBUG [Thread-2986 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e047c09 to 127.0.0.1:56318 2024-11-28T07:24:11,047 DEBUG [Thread-2986 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:11,052 DEBUG [Thread-2984 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x103dfc6e to 127.0.0.1:56318 2024-11-28T07:24:11,052 DEBUG [Thread-2984 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:11,278 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/b77b4d160c4546a7b60b6b8ba5bcab99 2024-11-28T07:24:11,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/bbf013bc0fb74e1297d5876aafa621a4 is 50, key is test_row_0/C:col10/1732778648751/Put/seqid=0 2024-11-28T07:24:11,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742658_1834 (size=7415) 2024-11-28T07:24:11,287 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=510 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/bbf013bc0fb74e1297d5876aafa621a4 2024-11-28T07:24:11,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/eb27574b45a5456b8173c53940013b0d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eb27574b45a5456b8173c53940013b0d 2024-11-28T07:24:11,293 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eb27574b45a5456b8173c53940013b0d, entries=50, sequenceid=510, filesize=13.5 K 2024-11-28T07:24:11,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/b77b4d160c4546a7b60b6b8ba5bcab99 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b77b4d160c4546a7b60b6b8ba5bcab99 2024-11-28T07:24:11,297 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b77b4d160c4546a7b60b6b8ba5bcab99, entries=50, sequenceid=510, filesize=7.2 K 2024-11-28T07:24:11,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/bbf013bc0fb74e1297d5876aafa621a4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/bbf013bc0fb74e1297d5876aafa621a4 2024-11-28T07:24:11,300 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/bbf013bc0fb74e1297d5876aafa621a4, entries=50, sequenceid=510, filesize=7.2 K 2024-11-28T07:24:11,301 INFO [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=13.42 KB/13740 for a6b84436e6ee345d2d4f94cd524e48a2 in 865ms, sequenceid=510, compaction requested=true 2024-11-28T07:24:11,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.HRegion(2538): Flush status journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:11,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:11,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/592d8b721726:0-0 {event_type=RS_FLUSH_REGIONS, pid=192}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=192 2024-11-28T07:24:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster(4106): Remote procedure done, pid=192 2024-11-28T07:24:11,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-11-28T07:24:11,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3170 sec 2024-11-28T07:24:11,304 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=191, table=TestAcidGuarantees in 3.3210 sec 2024-11-28T07:24:12,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-11-28T07:24:12,089 INFO [Thread-2992 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 191 completed 2024-11-28T07:24:12,302 DEBUG [Thread-2988 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:56318 2024-11-28T07:24:12,303 DEBUG [Thread-2988 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 119 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 117 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3817 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3879 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3737 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3818 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3889 2024-11-28T07:24:12,303 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T07:24:12,303 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T07:24:12,303 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x537a66f8 to 127.0.0.1:56318 2024-11-28T07:24:12,303 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:12,304 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T07:24:12,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T07:24:12,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=193, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T07:24:12,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-28T07:24:12,306 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778652306"}]},"ts":"1732778652306"} 2024-11-28T07:24:12,307 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T07:24:12,309 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T07:24:12,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T07:24:12,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=194, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, UNASSIGN}] 2024-11-28T07:24:12,311 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=195, ppid=194, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, UNASSIGN 2024-11-28T07:24:12,311 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=195 updating hbase:meta row=a6b84436e6ee345d2d4f94cd524e48a2, regionState=CLOSING, regionLocation=592d8b721726,33143,1732778474488 2024-11-28T07:24:12,312 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T07:24:12,312 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE; CloseRegionProcedure a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488}] 2024-11-28T07:24:12,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-28T07:24:12,463 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 592d8b721726,33143,1732778474488 2024-11-28T07:24:12,464 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing a6b84436e6ee345d2d4f94cd524e48a2, disabling compactions & flushes 2024-11-28T07:24:12,464 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. after waiting 0 ms 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:12,464 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(2837): Flushing a6b84436e6ee345d2d4f94cd524e48a2 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=A 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=B 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a6b84436e6ee345d2d4f94cd524e48a2, store=C 2024-11-28T07:24:12,464 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T07:24:12,468 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411281569f36d72554d3686f0db0031119aa8_a6b84436e6ee345d2d4f94cd524e48a2 is 50, key is test_row_1/A:col10/1732778652302/Put/seqid=0 2024-11-28T07:24:12,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742659_1835 (size=9914) 2024-11-28T07:24:12,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-28T07:24:12,847 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T07:24:12,872 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T07:24:12,874 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411281569f36d72554d3686f0db0031119aa8_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411281569f36d72554d3686f0db0031119aa8_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:12,875 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/9af2d81cb98d4351b2cc3362115b5005, store: [table=TestAcidGuarantees family=A region=a6b84436e6ee345d2d4f94cd524e48a2] 2024-11-28T07:24:12,875 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/9af2d81cb98d4351b2cc3362115b5005 is 175, key is test_row_1/A:col10/1732778652302/Put/seqid=0 2024-11-28T07:24:12,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742660_1836 (size=22561) 2024-11-28T07:24:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-28T07:24:13,278 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=516, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/9af2d81cb98d4351b2cc3362115b5005 2024-11-28T07:24:13,283 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/36b469bca6ba4e4fb85af9ed8b052f95 is 50, key is test_row_1/B:col10/1732778652302/Put/seqid=0 2024-11-28T07:24:13,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742661_1837 (size=9857) 2024-11-28T07:24:13,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-28T07:24:13,688 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/36b469bca6ba4e4fb85af9ed8b052f95 2024-11-28T07:24:13,692 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/17cb568ad7704351b3eed48e523b07fd is 50, key is test_row_1/C:col10/1732778652302/Put/seqid=0 2024-11-28T07:24:13,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742662_1838 (size=9857) 2024-11-28T07:24:14,096 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/17cb568ad7704351b3eed48e523b07fd 2024-11-28T07:24:14,099 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/A/9af2d81cb98d4351b2cc3362115b5005 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9af2d81cb98d4351b2cc3362115b5005 2024-11-28T07:24:14,102 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9af2d81cb98d4351b2cc3362115b5005, entries=100, sequenceid=516, filesize=22.0 K 2024-11-28T07:24:14,103 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/B/36b469bca6ba4e4fb85af9ed8b052f95 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/36b469bca6ba4e4fb85af9ed8b052f95 2024-11-28T07:24:14,106 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/36b469bca6ba4e4fb85af9ed8b052f95, entries=100, sequenceid=516, filesize=9.6 K 2024-11-28T07:24:14,106 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/.tmp/C/17cb568ad7704351b3eed48e523b07fd as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/17cb568ad7704351b3eed48e523b07fd 2024-11-28T07:24:14,109 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/17cb568ad7704351b3eed48e523b07fd, entries=100, sequenceid=516, filesize=9.6 K 2024-11-28T07:24:14,110 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for a6b84436e6ee345d2d4f94cd524e48a2 in 1646ms, sequenceid=516, compaction requested=true 2024-11-28T07:24:14,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31e636694cee417690f25a3be92cc005, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43f482cd42814b26be1ef1f20ea011f7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eacd89be5faf4aa29c2448d07283620c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d9742d5bb5cc4b8aab1c86378fe223ed, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/64ad0040541747369e4b3134b1f2f0a5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6006a79cc06b443ab11c5632b01d2db6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4f2c1374cd62406b8f00928490e11b01, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/bd1a53a7906349f6b47c6684e37b9bd1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/34b75f548b25474abdf8ade228a72e7f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c7dd02a4ec4a4948aeaa2b7e62fbdd19, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/057490e761ba49f09419748448d36857, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/44c08b8c95924426a84f38be67de129e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/90ec350c15bf498aa504bb352a5c4b19, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/db6104afa82b4e6e83c2315eeddeaf4a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/3c2235a8f8444bd09aa526d1facdba98, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c9491c9a1af14ce7aa0aa3358305e108, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d409b89a61bc482f8ccdc17d0ef4ae95, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/10f9a06c26354072b56812dce1c7f014, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/8da4adf4b0c6490aa3b44d7b9e822d71, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/32f13b2b62de4009b88e01f299e95e4d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/f1de003d7a5d452ea26ae06ce98c38f6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43513a05b306413bb48ec9452237c254, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/872df396b9914bee88286f3463bb0b41, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4009fd9246844037b234063d865b14ac, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9df510064ce4483fa0864a7eee3597eb, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/0ce6df7280b1438fa45a17267e2d3d18, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/df135a9870e7492b93948ddbe05e2243, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/67b1b0b9788743239f660177bfb99e0f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/1df5a2381be042889754435c5833f463, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6dfa4d2fd3484e38a67da2389d6038af, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4fd9740d68e54e43a776268b8727dfe2, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31483993556d4a53b074686166756e58, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/792d1e2ce78e44399f1a82fd978847ab, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/b3d86e104a44476abc6a3533ae6ff069] to archive 2024-11-28T07:24:14,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:24:14,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31e636694cee417690f25a3be92cc005 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31e636694cee417690f25a3be92cc005 2024-11-28T07:24:14,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43f482cd42814b26be1ef1f20ea011f7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43f482cd42814b26be1ef1f20ea011f7 2024-11-28T07:24:14,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eacd89be5faf4aa29c2448d07283620c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eacd89be5faf4aa29c2448d07283620c 2024-11-28T07:24:14,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d9742d5bb5cc4b8aab1c86378fe223ed to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d9742d5bb5cc4b8aab1c86378fe223ed 2024-11-28T07:24:14,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/64ad0040541747369e4b3134b1f2f0a5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/64ad0040541747369e4b3134b1f2f0a5 2024-11-28T07:24:14,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6006a79cc06b443ab11c5632b01d2db6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6006a79cc06b443ab11c5632b01d2db6 2024-11-28T07:24:14,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4f2c1374cd62406b8f00928490e11b01 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4f2c1374cd62406b8f00928490e11b01 2024-11-28T07:24:14,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/bd1a53a7906349f6b47c6684e37b9bd1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/bd1a53a7906349f6b47c6684e37b9bd1 2024-11-28T07:24:14,120 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/34b75f548b25474abdf8ade228a72e7f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/34b75f548b25474abdf8ade228a72e7f 2024-11-28T07:24:14,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c7dd02a4ec4a4948aeaa2b7e62fbdd19 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c7dd02a4ec4a4948aeaa2b7e62fbdd19 2024-11-28T07:24:14,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/057490e761ba49f09419748448d36857 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/057490e761ba49f09419748448d36857 2024-11-28T07:24:14,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/44c08b8c95924426a84f38be67de129e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/44c08b8c95924426a84f38be67de129e 2024-11-28T07:24:14,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/90ec350c15bf498aa504bb352a5c4b19 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/90ec350c15bf498aa504bb352a5c4b19 2024-11-28T07:24:14,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/db6104afa82b4e6e83c2315eeddeaf4a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/db6104afa82b4e6e83c2315eeddeaf4a 2024-11-28T07:24:14,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/3c2235a8f8444bd09aa526d1facdba98 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/3c2235a8f8444bd09aa526d1facdba98 2024-11-28T07:24:14,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c9491c9a1af14ce7aa0aa3358305e108 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/c9491c9a1af14ce7aa0aa3358305e108 2024-11-28T07:24:14,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d409b89a61bc482f8ccdc17d0ef4ae95 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/d409b89a61bc482f8ccdc17d0ef4ae95 2024-11-28T07:24:14,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/10f9a06c26354072b56812dce1c7f014 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/10f9a06c26354072b56812dce1c7f014 2024-11-28T07:24:14,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/8da4adf4b0c6490aa3b44d7b9e822d71 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/8da4adf4b0c6490aa3b44d7b9e822d71 2024-11-28T07:24:14,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/32f13b2b62de4009b88e01f299e95e4d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/32f13b2b62de4009b88e01f299e95e4d 2024-11-28T07:24:14,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/f1de003d7a5d452ea26ae06ce98c38f6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/f1de003d7a5d452ea26ae06ce98c38f6 2024-11-28T07:24:14,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43513a05b306413bb48ec9452237c254 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/43513a05b306413bb48ec9452237c254 2024-11-28T07:24:14,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/872df396b9914bee88286f3463bb0b41 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/872df396b9914bee88286f3463bb0b41 2024-11-28T07:24:14,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4009fd9246844037b234063d865b14ac to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4009fd9246844037b234063d865b14ac 2024-11-28T07:24:14,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9df510064ce4483fa0864a7eee3597eb to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9df510064ce4483fa0864a7eee3597eb 2024-11-28T07:24:14,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/0ce6df7280b1438fa45a17267e2d3d18 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/0ce6df7280b1438fa45a17267e2d3d18 2024-11-28T07:24:14,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/df135a9870e7492b93948ddbe05e2243 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/df135a9870e7492b93948ddbe05e2243 2024-11-28T07:24:14,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/67b1b0b9788743239f660177bfb99e0f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/67b1b0b9788743239f660177bfb99e0f 2024-11-28T07:24:14,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/1df5a2381be042889754435c5833f463 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/1df5a2381be042889754435c5833f463 2024-11-28T07:24:14,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6dfa4d2fd3484e38a67da2389d6038af to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/6dfa4d2fd3484e38a67da2389d6038af 2024-11-28T07:24:14,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4fd9740d68e54e43a776268b8727dfe2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/4fd9740d68e54e43a776268b8727dfe2 2024-11-28T07:24:14,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31483993556d4a53b074686166756e58 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/31483993556d4a53b074686166756e58 2024-11-28T07:24:14,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/792d1e2ce78e44399f1a82fd978847ab to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/792d1e2ce78e44399f1a82fd978847ab 2024-11-28T07:24:14,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/b3d86e104a44476abc6a3533ae6ff069 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/b3d86e104a44476abc6a3533ae6ff069 2024-11-28T07:24:14,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a5ae23e52aa54e08b4553fa30d6de102, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dd0b4d43f859483aab4235a00a894462, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/c063a972e3a74962b751d7d38aea67a8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7fc7d0b05274446e8a73607669e5e88a, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a07ad05677146819580e3d4c8f10478, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/344e11f3a4ca41af859cf5795cbb4e57, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/839b524f0c7b4165a92ea4abe67915ec, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7ca2426ba1a54cc49411de53e415d4f9, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/12a90c69c5614496992d29380fa9a18b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b34ea5f6e6d24a67a66c0605ada397bf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/11370cb7d260403b9d06b7763bd96d31, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/9753c1f4a5554a6da28957c3fbe4e7bd, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/09886e2624344ab696e907935b518b0d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3ce5364970e94a0f91684514cc4a6cc1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b84744d575e34e50b6d97977a62ad53b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5a0b3d516a9c4779909cfeb252d52d27, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/165afd09bea64906b50c79020179d156, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/8a41336e0e2f45fab1af54187e0a178f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/2ee2c2f6a768478ba1b075b950e629ca, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3e31027b98024d4fb05452b8fc9e1446, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/86bdc49f8b634c44ae5658dc4e867813, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/58109c171be645a0a28820144f8ac83f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/f7ec0545928f43b9bcc22335c0f66e12, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5fe47464a7e34cf098d9645c6c8b8bd8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/154badcf202a4eb7aec5d89e10c6e7b4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1c1e20cb27c04c6982abc73128c8f9b5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5048ecfb546e4b20bcd8c4e8a26cde25, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dca13fd1ea37441c850279ca513e6df3, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/bab8d7edd571448d8268021472dbdf5e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a8d8502a9cf14e899f55b8067b3ebda5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/596179225c6e46bba1f0c24bcb78fdc8, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7b2b6028c2474eb5ad9827832b0952c5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/db2280d27ac74d8f805366d1c4f4bc87, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a25ba7351f6478bb8fb261a8847753e] to archive 2024-11-28T07:24:14,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:24:14,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a5ae23e52aa54e08b4553fa30d6de102 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a5ae23e52aa54e08b4553fa30d6de102 2024-11-28T07:24:14,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dd0b4d43f859483aab4235a00a894462 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dd0b4d43f859483aab4235a00a894462 2024-11-28T07:24:14,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/c063a972e3a74962b751d7d38aea67a8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/c063a972e3a74962b751d7d38aea67a8 2024-11-28T07:24:14,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7fc7d0b05274446e8a73607669e5e88a to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7fc7d0b05274446e8a73607669e5e88a 2024-11-28T07:24:14,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a07ad05677146819580e3d4c8f10478 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a07ad05677146819580e3d4c8f10478 2024-11-28T07:24:14,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/344e11f3a4ca41af859cf5795cbb4e57 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/344e11f3a4ca41af859cf5795cbb4e57 2024-11-28T07:24:14,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/839b524f0c7b4165a92ea4abe67915ec to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/839b524f0c7b4165a92ea4abe67915ec 2024-11-28T07:24:14,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7ca2426ba1a54cc49411de53e415d4f9 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7ca2426ba1a54cc49411de53e415d4f9 2024-11-28T07:24:14,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/12a90c69c5614496992d29380fa9a18b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/12a90c69c5614496992d29380fa9a18b 2024-11-28T07:24:14,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b34ea5f6e6d24a67a66c0605ada397bf to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b34ea5f6e6d24a67a66c0605ada397bf 2024-11-28T07:24:14,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/11370cb7d260403b9d06b7763bd96d31 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/11370cb7d260403b9d06b7763bd96d31 2024-11-28T07:24:14,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/9753c1f4a5554a6da28957c3fbe4e7bd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/9753c1f4a5554a6da28957c3fbe4e7bd 2024-11-28T07:24:14,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/09886e2624344ab696e907935b518b0d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/09886e2624344ab696e907935b518b0d 2024-11-28T07:24:14,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3ce5364970e94a0f91684514cc4a6cc1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3ce5364970e94a0f91684514cc4a6cc1 2024-11-28T07:24:14,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b84744d575e34e50b6d97977a62ad53b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b84744d575e34e50b6d97977a62ad53b 2024-11-28T07:24:14,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5a0b3d516a9c4779909cfeb252d52d27 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5a0b3d516a9c4779909cfeb252d52d27 2024-11-28T07:24:14,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/165afd09bea64906b50c79020179d156 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/165afd09bea64906b50c79020179d156 2024-11-28T07:24:14,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/8a41336e0e2f45fab1af54187e0a178f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/8a41336e0e2f45fab1af54187e0a178f 2024-11-28T07:24:14,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/2ee2c2f6a768478ba1b075b950e629ca to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/2ee2c2f6a768478ba1b075b950e629ca 2024-11-28T07:24:14,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3e31027b98024d4fb05452b8fc9e1446 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/3e31027b98024d4fb05452b8fc9e1446 2024-11-28T07:24:14,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/86bdc49f8b634c44ae5658dc4e867813 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/86bdc49f8b634c44ae5658dc4e867813 2024-11-28T07:24:14,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/58109c171be645a0a28820144f8ac83f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/58109c171be645a0a28820144f8ac83f 2024-11-28T07:24:14,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/f7ec0545928f43b9bcc22335c0f66e12 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/f7ec0545928f43b9bcc22335c0f66e12 2024-11-28T07:24:14,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5fe47464a7e34cf098d9645c6c8b8bd8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5fe47464a7e34cf098d9645c6c8b8bd8 2024-11-28T07:24:14,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/154badcf202a4eb7aec5d89e10c6e7b4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/154badcf202a4eb7aec5d89e10c6e7b4 2024-11-28T07:24:14,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1c1e20cb27c04c6982abc73128c8f9b5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1c1e20cb27c04c6982abc73128c8f9b5 2024-11-28T07:24:14,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5048ecfb546e4b20bcd8c4e8a26cde25 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/5048ecfb546e4b20bcd8c4e8a26cde25 2024-11-28T07:24:14,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dca13fd1ea37441c850279ca513e6df3 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/dca13fd1ea37441c850279ca513e6df3 2024-11-28T07:24:14,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/bab8d7edd571448d8268021472dbdf5e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/bab8d7edd571448d8268021472dbdf5e 2024-11-28T07:24:14,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a8d8502a9cf14e899f55b8067b3ebda5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/a8d8502a9cf14e899f55b8067b3ebda5 2024-11-28T07:24:14,173 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/596179225c6e46bba1f0c24bcb78fdc8 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/596179225c6e46bba1f0c24bcb78fdc8 2024-11-28T07:24:14,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7b2b6028c2474eb5ad9827832b0952c5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/7b2b6028c2474eb5ad9827832b0952c5 2024-11-28T07:24:14,174 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/db2280d27ac74d8f805366d1c4f4bc87 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/db2280d27ac74d8f805366d1c4f4bc87 2024-11-28T07:24:14,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a25ba7351f6478bb8fb261a8847753e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1a25ba7351f6478bb8fb261a8847753e 2024-11-28T07:24:14,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b3330bcc900049f682ba3ab9d5bf4cd1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6a7cef1d195344bdbd75155b74c82b29, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6fed1a396a3548d2be7988de0a1c3c66, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0f920456ccc343f3a3595e7b5c87498c, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/de88311ff07c4380b86ad751f872cd45, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/94a8b8f9c71e4e7d901c9c8139001da5, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5ef66c56ef234f2b9bcfbe57d228f1ab, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/39044e5f22e344ce9034b38112b68b25, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/15a98f745ba04baabe9dcc02bb8616ba, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b0a1fec7a6c74c7caa1c8f84ff87d423, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/8e1baca52ed2401e8a07fe6c2d325f3b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5dce0991e33542149d0a806b745cfe0f, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/3e37c3fe462f4da48cdb1653b12dad8b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/dbecdeb4300a46a2973c44a0a2c2b0ba, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fae893d3228c47f39cb83f596f619269, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e92883d1494a4fa989ab9691f29f47b7, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/06a731c7891d43feaa96780d6eebb884, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/95f4c981a83b4225aa97d4cc3deaf6a6, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c858e8293e2745889b7cf5fd8ca0ab9d, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44eda0d912d84309b93a7e9a7ac67194, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7034c6719cab49ce8523123e3c5a02c1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c9de82a550a4444d80d5312e2bc4889e, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7139f8842a2a43789f2effc52803ca47, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/59b8357826164ff7948b0b8d5244dbbf, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/43e89f21c7bf4094ba7abe757088911b, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/4b1c1857537c4e8cbc774ee5b113f051, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/d53994383a08405689d049c145c19296, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fbf8b37cd2a0424da7e654095b603f98, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fa2743a7c90044f898e6f5955e6236b1, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0a80c7436ae741028c0c3b0a70269782, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/27b0cf419d424311b41f1f2baee76fb4, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44e7202183114abbb1d34b2378a112cc, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e2dd21c5e3cb4a0b9be2616c5d192e80, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/f5331fc1abcd4d09acfe7787b6473ef0] to archive 2024-11-28T07:24:14,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T07:24:14,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b3330bcc900049f682ba3ab9d5bf4cd1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b3330bcc900049f682ba3ab9d5bf4cd1 2024-11-28T07:24:14,179 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6a7cef1d195344bdbd75155b74c82b29 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6a7cef1d195344bdbd75155b74c82b29 2024-11-28T07:24:14,180 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6fed1a396a3548d2be7988de0a1c3c66 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/6fed1a396a3548d2be7988de0a1c3c66 2024-11-28T07:24:14,181 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0f920456ccc343f3a3595e7b5c87498c to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0f920456ccc343f3a3595e7b5c87498c 2024-11-28T07:24:14,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/de88311ff07c4380b86ad751f872cd45 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/de88311ff07c4380b86ad751f872cd45 2024-11-28T07:24:14,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/94a8b8f9c71e4e7d901c9c8139001da5 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/94a8b8f9c71e4e7d901c9c8139001da5 2024-11-28T07:24:14,184 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5ef66c56ef234f2b9bcfbe57d228f1ab to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5ef66c56ef234f2b9bcfbe57d228f1ab 2024-11-28T07:24:14,185 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/39044e5f22e344ce9034b38112b68b25 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/39044e5f22e344ce9034b38112b68b25 2024-11-28T07:24:14,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/15a98f745ba04baabe9dcc02bb8616ba to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/15a98f745ba04baabe9dcc02bb8616ba 2024-11-28T07:24:14,187 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b0a1fec7a6c74c7caa1c8f84ff87d423 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/b0a1fec7a6c74c7caa1c8f84ff87d423 2024-11-28T07:24:14,188 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/8e1baca52ed2401e8a07fe6c2d325f3b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/8e1baca52ed2401e8a07fe6c2d325f3b 2024-11-28T07:24:14,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5dce0991e33542149d0a806b745cfe0f to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/5dce0991e33542149d0a806b745cfe0f 2024-11-28T07:24:14,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/3e37c3fe462f4da48cdb1653b12dad8b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/3e37c3fe462f4da48cdb1653b12dad8b 2024-11-28T07:24:14,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/dbecdeb4300a46a2973c44a0a2c2b0ba to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/dbecdeb4300a46a2973c44a0a2c2b0ba 2024-11-28T07:24:14,191 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fae893d3228c47f39cb83f596f619269 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fae893d3228c47f39cb83f596f619269 2024-11-28T07:24:14,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e92883d1494a4fa989ab9691f29f47b7 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e92883d1494a4fa989ab9691f29f47b7 2024-11-28T07:24:14,193 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/06a731c7891d43feaa96780d6eebb884 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/06a731c7891d43feaa96780d6eebb884 2024-11-28T07:24:14,194 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/95f4c981a83b4225aa97d4cc3deaf6a6 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/95f4c981a83b4225aa97d4cc3deaf6a6 2024-11-28T07:24:14,195 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c858e8293e2745889b7cf5fd8ca0ab9d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c858e8293e2745889b7cf5fd8ca0ab9d 2024-11-28T07:24:14,196 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44eda0d912d84309b93a7e9a7ac67194 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44eda0d912d84309b93a7e9a7ac67194 2024-11-28T07:24:14,197 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7034c6719cab49ce8523123e3c5a02c1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7034c6719cab49ce8523123e3c5a02c1 2024-11-28T07:24:14,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c9de82a550a4444d80d5312e2bc4889e to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/c9de82a550a4444d80d5312e2bc4889e 2024-11-28T07:24:14,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7139f8842a2a43789f2effc52803ca47 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7139f8842a2a43789f2effc52803ca47 2024-11-28T07:24:14,199 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/59b8357826164ff7948b0b8d5244dbbf to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/59b8357826164ff7948b0b8d5244dbbf 2024-11-28T07:24:14,200 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/43e89f21c7bf4094ba7abe757088911b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/43e89f21c7bf4094ba7abe757088911b 2024-11-28T07:24:14,201 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/4b1c1857537c4e8cbc774ee5b113f051 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/4b1c1857537c4e8cbc774ee5b113f051 2024-11-28T07:24:14,202 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/d53994383a08405689d049c145c19296 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/d53994383a08405689d049c145c19296 2024-11-28T07:24:14,203 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fbf8b37cd2a0424da7e654095b603f98 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fbf8b37cd2a0424da7e654095b603f98 2024-11-28T07:24:14,204 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fa2743a7c90044f898e6f5955e6236b1 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/fa2743a7c90044f898e6f5955e6236b1 2024-11-28T07:24:14,205 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0a80c7436ae741028c0c3b0a70269782 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/0a80c7436ae741028c0c3b0a70269782 2024-11-28T07:24:14,206 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/27b0cf419d424311b41f1f2baee76fb4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/27b0cf419d424311b41f1f2baee76fb4 2024-11-28T07:24:14,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44e7202183114abbb1d34b2378a112cc to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/44e7202183114abbb1d34b2378a112cc 2024-11-28T07:24:14,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e2dd21c5e3cb4a0b9be2616c5d192e80 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/e2dd21c5e3cb4a0b9be2616c5d192e80 2024-11-28T07:24:14,208 DEBUG [StoreCloser-TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/f5331fc1abcd4d09acfe7787b6473ef0 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/f5331fc1abcd4d09acfe7787b6473ef0 2024-11-28T07:24:14,212 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/recovered.edits/519.seqid, newMaxSeqId=519, maxSeqId=4 2024-11-28T07:24:14,212 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2. 2024-11-28T07:24:14,212 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for a6b84436e6ee345d2d4f94cd524e48a2: 2024-11-28T07:24:14,213 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,214 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=195 updating hbase:meta row=a6b84436e6ee345d2d4f94cd524e48a2, regionState=CLOSED 2024-11-28T07:24:14,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=195 2024-11-28T07:24:14,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=195, state=SUCCESS; CloseRegionProcedure a6b84436e6ee345d2d4f94cd524e48a2, server=592d8b721726,33143,1732778474488 in 1.9030 sec 2024-11-28T07:24:14,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=194 2024-11-28T07:24:14,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=194, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a6b84436e6ee345d2d4f94cd524e48a2, UNASSIGN in 1.9060 sec 2024-11-28T07:24:14,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=193 2024-11-28T07:24:14,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=193, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9070 sec 2024-11-28T07:24:14,219 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732778654219"}]},"ts":"1732778654219"} 2024-11-28T07:24:14,220 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T07:24:14,222 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T07:24:14,224 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9190 sec 2024-11-28T07:24:14,238 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-28T07:24:14,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=193 2024-11-28T07:24:14,410 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 193 completed 2024-11-28T07:24:14,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T07:24:14,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:24:14,411 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:24:14,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-11-28T07:24:14,412 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:24:14,413 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,415 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C, FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/recovered.edits] 2024-11-28T07:24:14,418 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/5eda9d30830e4c74a759aadfa91233c2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/5eda9d30830e4c74a759aadfa91233c2 2024-11-28T07:24:14,419 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/7c047ade1c1e40059774fa0ab3096e45 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/7c047ade1c1e40059774fa0ab3096e45 2024-11-28T07:24:14,420 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9af2d81cb98d4351b2cc3362115b5005 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/9af2d81cb98d4351b2cc3362115b5005 2024-11-28T07:24:14,421 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eb27574b45a5456b8173c53940013b0d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/A/eb27574b45a5456b8173c53940013b0d 2024-11-28T07:24:14,422 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1338b29674644b2da30ac7084c9e2d58 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/1338b29674644b2da30ac7084c9e2d58 2024-11-28T07:24:14,423 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/243b78e0c6f246efb34854140a24469d to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/243b78e0c6f246efb34854140a24469d 2024-11-28T07:24:14,424 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/36b469bca6ba4e4fb85af9ed8b052f95 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/36b469bca6ba4e4fb85af9ed8b052f95 2024-11-28T07:24:14,425 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b77b4d160c4546a7b60b6b8ba5bcab99 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/B/b77b4d160c4546a7b60b6b8ba5bcab99 2024-11-28T07:24:14,426 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/01dbd610ffb046e2a2344386dfdefe0b to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/01dbd610ffb046e2a2344386dfdefe0b 2024-11-28T07:24:14,427 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/17cb568ad7704351b3eed48e523b07fd to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/17cb568ad7704351b3eed48e523b07fd 2024-11-28T07:24:14,428 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7fd5f80b3b3745a6a4dcffe09c6649ac to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/7fd5f80b3b3745a6a4dcffe09c6649ac 2024-11-28T07:24:14,429 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/bbf013bc0fb74e1297d5876aafa621a4 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/C/bbf013bc0fb74e1297d5876aafa621a4 2024-11-28T07:24:14,431 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/recovered.edits/519.seqid to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2/recovered.edits/519.seqid 2024-11-28T07:24:14,431 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/default/TestAcidGuarantees/a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,431 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T07:24:14,432 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T07:24:14,432 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-28T07:24:14,434 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128011a08955c5445d18a15d561b481df43_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128011a08955c5445d18a15d561b481df43_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,435 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280322b66556d944e5a0fae661ea3917fa_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280322b66556d944e5a0fae661ea3917fa_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,436 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280a9d4a53ff2c4aa490d4e390fe316afe_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280a9d4a53ff2c4aa490d4e390fe316afe_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,437 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112813049d711da24cb59433fcda831e0178_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112813049d711da24cb59433fcda831e0178_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,438 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411281569f36d72554d3686f0db0031119aa8_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411281569f36d72554d3686f0db0031119aa8_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,439 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128188795f79096459abdcb2763e97f8075_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128188795f79096459abdcb2763e97f8075_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,440 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112839c9db04227041f4b6c08e5fefaeffc5_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112839c9db04227041f4b6c08e5fefaeffc5_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,441 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112843b9686f86c44bf6919d6391bd309e3c_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112843b9686f86c44bf6919d6391bd309e3c_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,441 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285ae447df78884a708142bd4f931b93be_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285ae447df78884a708142bd4f931b93be_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,442 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285ef0c599dc484aa1a799ce215c61f185_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285ef0c599dc484aa1a799ce215c61f185_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,443 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128605ddf7e38be40ee848f2040f482a86b_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128605ddf7e38be40ee848f2040f482a86b_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,444 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112865ee6723288b45e39e955ff2e613325c_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112865ee6723288b45e39e955ff2e613325c_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,445 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112877761030b19946f793c4cee7b5e8e254_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112877761030b19946f793c4cee7b5e8e254_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,446 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287cc49a50ceef489fa005948fb8a9108b_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287cc49a50ceef489fa005948fb8a9108b_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,446 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112882958e7a9f5f4c10a20c9ac354bd2eec_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112882958e7a9f5f4c10a20c9ac354bd2eec_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,447 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112887f9237501e145538c187ae10e43dd59_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112887f9237501e145538c187ae10e43dd59_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,448 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288c6c27b18f5e4a2ebb4493a9c062f877_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288c6c27b18f5e4a2ebb4493a9c062f877_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,449 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288d8851f19e224fe5a4cb5037905a506e_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288d8851f19e224fe5a4cb5037905a506e_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,449 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128a2f8ef2cd5ee472286c679060d67e8fc_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128a2f8ef2cd5ee472286c679060d67e8fc_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,450 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128a3c5d22b7dea416494142e76e9c2dd48_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128a3c5d22b7dea416494142e76e9c2dd48_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,451 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ad925c42b0d243f9bb154bdce738200c_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ad925c42b0d243f9bb154bdce738200c_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,452 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b550da4f62c54bffb58fe934095a2db2_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b550da4f62c54bffb58fe934095a2db2_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,453 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b5bb6dfe6139438b8c20d13c6344981f_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b5bb6dfe6139438b8c20d13c6344981f_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,453 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128bab5612127f04633be7997b345ad9bd7_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128bab5612127f04633be7997b345ad9bd7_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,454 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c1b97235bbaf41328ae6019255fb2516_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c1b97235bbaf41328ae6019255fb2516_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,455 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128d617d33d1e404ce4bc9ccba5e799e7a5_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128d617d33d1e404ce4bc9ccba5e799e7a5_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,455 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f483562d43014e91acf13c9432d86738_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f483562d43014e91acf13c9432d86738_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,456 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f5fd9a43cbcb4557b626c3c0a8eb92ad_a6b84436e6ee345d2d4f94cd524e48a2 to hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f5fd9a43cbcb4557b626c3c0a8eb92ad_a6b84436e6ee345d2d4f94cd524e48a2 2024-11-28T07:24:14,457 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T07:24:14,459 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:24:14,460 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T07:24:14,462 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T07:24:14,462 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:24:14,462 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T07:24:14,463 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732778654462"}]},"ts":"9223372036854775807"} 2024-11-28T07:24:14,464 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T07:24:14,464 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a6b84436e6ee345d2d4f94cd524e48a2, NAME => 'TestAcidGuarantees,,1732778625585.a6b84436e6ee345d2d4f94cd524e48a2.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T07:24:14,464 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T07:24:14,464 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732778654464"}]},"ts":"9223372036854775807"} 2024-11-28T07:24:14,465 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T07:24:14,467 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T07:24:14,468 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 57 msec 2024-11-28T07:24:14,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41703 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-11-28T07:24:14,513 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 197 completed 2024-11-28T07:24:14,523 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=241 (was 242), OpenFileDescriptor=458 (was 458), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=714 (was 650) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4176 (was 4310) 2024-11-28T07:24:14,523 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-28T07:24:14,523 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T07:24:14,523 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:56318 2024-11-28T07:24:14,523 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:14,523 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-28T07:24:14,524 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=633883069, stopped=false 2024-11-28T07:24:14,524 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=592d8b721726,41703,1732778473746 2024-11-28T07:24:14,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T07:24:14,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T07:24:14,526 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-28T07:24:14,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:24:14,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:24:14,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:14,526 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T07:24:14,526 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '592d8b721726,33143,1732778474488' ***** 2024-11-28T07:24:14,526 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-28T07:24:14,527 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T07:24:14,527 INFO [RS:0;592d8b721726:33143 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-28T07:24:14,527 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-28T07:24:14,527 INFO [RS:0;592d8b721726:33143 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-28T07:24:14,527 INFO [RS:0;592d8b721726:33143 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-28T07:24:14,527 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(3579): Received CLOSE for 29128ed80b74de0f148960cd93ceedac 2024-11-28T07:24:14,528 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1224): stopping server 592d8b721726,33143,1732778474488 2024-11-28T07:24:14,528 DEBUG [RS:0;592d8b721726:33143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:14,528 INFO [RS:0;592d8b721726:33143 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-28T07:24:14,528 INFO [RS:0;592d8b721726:33143 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-28T07:24:14,528 INFO [RS:0;592d8b721726:33143 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-28T07:24:14,528 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-28T07:24:14,528 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 29128ed80b74de0f148960cd93ceedac, disabling compactions & flushes 2024-11-28T07:24:14,528 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:24:14,528 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:24:14,528 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. after waiting 0 ms 2024-11-28T07:24:14,528 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:24:14,528 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 29128ed80b74de0f148960cd93ceedac 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-28T07:24:14,528 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-28T07:24:14,528 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1603): Online Regions={29128ed80b74de0f148960cd93ceedac=hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac., 1588230740=hbase:meta,,1.1588230740} 2024-11-28T07:24:14,528 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-28T07:24:14,528 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-28T07:24:14,528 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-28T07:24:14,528 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-28T07:24:14,528 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-28T07:24:14,528 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-28T07:24:14,532 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 29128ed80b74de0f148960cd93ceedac 2024-11-28T07:24:14,545 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/namespace/29128ed80b74de0f148960cd93ceedac/.tmp/info/886c9c9b1f444aa5984d411f72e8192d is 45, key is default/info:d/1732778478092/Put/seqid=0 2024-11-28T07:24:14,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742663_1839 (size=5037) 2024-11-28T07:24:14,553 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/.tmp/info/2782f8b296ad42ddb1b5af502c580500 is 143, key is hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac./info:regioninfo/1732778477987/Put/seqid=0 2024-11-28T07:24:14,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742664_1840 (size=7725) 2024-11-28T07:24:14,588 INFO [regionserver/592d8b721726:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T07:24:14,732 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 29128ed80b74de0f148960cd93ceedac 2024-11-28T07:24:14,932 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 29128ed80b74de0f148960cd93ceedac 2024-11-28T07:24:14,948 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/namespace/29128ed80b74de0f148960cd93ceedac/.tmp/info/886c9c9b1f444aa5984d411f72e8192d 2024-11-28T07:24:14,951 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/namespace/29128ed80b74de0f148960cd93ceedac/.tmp/info/886c9c9b1f444aa5984d411f72e8192d as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/namespace/29128ed80b74de0f148960cd93ceedac/info/886c9c9b1f444aa5984d411f72e8192d 2024-11-28T07:24:14,953 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/namespace/29128ed80b74de0f148960cd93ceedac/info/886c9c9b1f444aa5984d411f72e8192d, entries=2, sequenceid=6, filesize=4.9 K 2024-11-28T07:24:14,954 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 29128ed80b74de0f148960cd93ceedac in 426ms, sequenceid=6, compaction requested=false 2024-11-28T07:24:14,957 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/.tmp/info/2782f8b296ad42ddb1b5af502c580500 2024-11-28T07:24:14,957 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/namespace/29128ed80b74de0f148960cd93ceedac/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T07:24:14,957 INFO [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:24:14,957 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 29128ed80b74de0f148960cd93ceedac: 2024-11-28T07:24:14,958 DEBUG [RS_CLOSE_REGION-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732778477140.29128ed80b74de0f148960cd93ceedac. 2024-11-28T07:24:14,974 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/.tmp/rep_barrier/e495ca4a48534f268aa2188e609ca627 is 102, key is TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683./rep_barrier:/1732778505232/DeleteFamily/seqid=0 2024-11-28T07:24:14,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742665_1841 (size=6025) 2024-11-28T07:24:15,133 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-28T07:24:15,333 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-28T07:24:15,374 INFO [regionserver/592d8b721726:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-28T07:24:15,374 INFO [regionserver/592d8b721726:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-28T07:24:15,377 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/.tmp/rep_barrier/e495ca4a48534f268aa2188e609ca627 2024-11-28T07:24:15,395 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/.tmp/table/77d17bd71bee4ff2a590ad4e2ade69bb is 96, key is TestAcidGuarantees,,1732778478346.af0c88dc7f2cd28f9a7271a3bc766683./table:/1732778505232/DeleteFamily/seqid=0 2024-11-28T07:24:15,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742666_1842 (size=5942) 2024-11-28T07:24:15,533 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-28T07:24:15,533 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-28T07:24:15,533 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-28T07:24:15,733 DEBUG [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-28T07:24:15,799 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/.tmp/table/77d17bd71bee4ff2a590ad4e2ade69bb 2024-11-28T07:24:15,802 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/.tmp/info/2782f8b296ad42ddb1b5af502c580500 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/info/2782f8b296ad42ddb1b5af502c580500 2024-11-28T07:24:15,804 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/info/2782f8b296ad42ddb1b5af502c580500, entries=22, sequenceid=93, filesize=7.5 K 2024-11-28T07:24:15,805 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/.tmp/rep_barrier/e495ca4a48534f268aa2188e609ca627 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/rep_barrier/e495ca4a48534f268aa2188e609ca627 2024-11-28T07:24:15,807 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/rep_barrier/e495ca4a48534f268aa2188e609ca627, entries=6, sequenceid=93, filesize=5.9 K 2024-11-28T07:24:15,808 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/.tmp/table/77d17bd71bee4ff2a590ad4e2ade69bb as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/table/77d17bd71bee4ff2a590ad4e2ade69bb 2024-11-28T07:24:15,810 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/table/77d17bd71bee4ff2a590ad4e2ade69bb, entries=9, sequenceid=93, filesize=5.8 K 2024-11-28T07:24:15,811 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1283ms, sequenceid=93, compaction requested=false 2024-11-28T07:24:15,814 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-28T07:24:15,815 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T07:24:15,815 INFO [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-28T07:24:15,815 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-28T07:24:15,815 DEBUG [RS_CLOSE_META-regionserver/592d8b721726:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-28T07:24:15,934 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1250): stopping server 592d8b721726,33143,1732778474488; all regions closed. 2024-11-28T07:24:15,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741834_1010 (size=26050) 2024-11-28T07:24:15,939 DEBUG [RS:0;592d8b721726:33143 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/oldWALs 2024-11-28T07:24:15,939 INFO [RS:0;592d8b721726:33143 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 592d8b721726%2C33143%2C1732778474488.meta:.meta(num 1732778476898) 2024-11-28T07:24:15,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741833_1009 (size=19330037) 2024-11-28T07:24:15,942 DEBUG [RS:0;592d8b721726:33143 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/oldWALs 2024-11-28T07:24:15,942 INFO [RS:0;592d8b721726:33143 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 592d8b721726%2C33143%2C1732778474488:(num 1732778476468) 2024-11-28T07:24:15,942 DEBUG [RS:0;592d8b721726:33143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:15,942 INFO [RS:0;592d8b721726:33143 {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T07:24:15,943 INFO [RS:0;592d8b721726:33143 {}] hbase.ChoreService(370): Chore service for: regionserver/592d8b721726:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-28T07:24:15,943 INFO [regionserver/592d8b721726:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-28T07:24:15,944 INFO [RS:0;592d8b721726:33143 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33143 2024-11-28T07:24:15,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-28T07:24:15,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/592d8b721726,33143,1732778474488 2024-11-28T07:24:15,947 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007f74cc8f1d48@648cf531 rejected from java.util.concurrent.ThreadPoolExecutor@218e67c4[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-28T07:24:15,948 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [592d8b721726,33143,1732778474488] 2024-11-28T07:24:15,948 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 592d8b721726,33143,1732778474488; numProcessing=1 2024-11-28T07:24:15,950 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/592d8b721726,33143,1732778474488 already deleted, retry=false 2024-11-28T07:24:15,950 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 592d8b721726,33143,1732778474488 expired; onlineServers=0 2024-11-28T07:24:15,950 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '592d8b721726,41703,1732778473746' ***** 2024-11-28T07:24:15,950 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-28T07:24:15,950 DEBUG [M:0;592d8b721726:41703 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41d66913, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=592d8b721726/172.17.0.2:0 2024-11-28T07:24:15,950 INFO [M:0;592d8b721726:41703 {}] regionserver.HRegionServer(1224): stopping server 592d8b721726,41703,1732778473746 2024-11-28T07:24:15,950 INFO [M:0;592d8b721726:41703 {}] regionserver.HRegionServer(1250): stopping server 592d8b721726,41703,1732778473746; all regions closed. 2024-11-28T07:24:15,950 DEBUG [M:0;592d8b721726:41703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T07:24:15,950 DEBUG [M:0;592d8b721726:41703 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-28T07:24:15,951 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-28T07:24:15,951 DEBUG [M:0;592d8b721726:41703 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-28T07:24:15,951 DEBUG [master/592d8b721726:0:becomeActiveMaster-HFileCleaner.small.0-1732778476200 {}] cleaner.HFileCleaner(306): Exit Thread[master/592d8b721726:0:becomeActiveMaster-HFileCleaner.small.0-1732778476200,5,FailOnTimeoutGroup] 2024-11-28T07:24:15,951 DEBUG [master/592d8b721726:0:becomeActiveMaster-HFileCleaner.large.0-1732778476199 {}] cleaner.HFileCleaner(306): Exit Thread[master/592d8b721726:0:becomeActiveMaster-HFileCleaner.large.0-1732778476199,5,FailOnTimeoutGroup] 2024-11-28T07:24:15,951 INFO [M:0;592d8b721726:41703 {}] hbase.ChoreService(370): Chore service for: master/592d8b721726:0 had [] on shutdown 2024-11-28T07:24:15,951 DEBUG [M:0;592d8b721726:41703 {}] master.HMaster(1733): Stopping service threads 2024-11-28T07:24:15,951 INFO [M:0;592d8b721726:41703 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-28T07:24:15,951 ERROR [M:0;592d8b721726:41703 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-28T07:24:15,952 INFO [M:0;592d8b721726:41703 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-28T07:24:15,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-28T07:24:15,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T07:24:15,952 DEBUG [M:0;592d8b721726:41703 {}] zookeeper.ZKUtil(347): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-28T07:24:15,952 WARN [M:0;592d8b721726:41703 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-28T07:24:15,952 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T07:24:15,952 INFO [M:0;592d8b721726:41703 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-28T07:24:15,952 INFO [M:0;592d8b721726:41703 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-28T07:24:15,953 DEBUG [M:0;592d8b721726:41703 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-28T07:24:15,953 INFO [M:0;592d8b721726:41703 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T07:24:15,953 DEBUG [M:0;592d8b721726:41703 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T07:24:15,953 DEBUG [M:0;592d8b721726:41703 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-28T07:24:15,953 DEBUG [M:0;592d8b721726:41703 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T07:24:15,953 INFO [M:0;592d8b721726:41703 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=861.98 KB heapSize=1.04 MB 2024-11-28T07:24:15,953 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-28T07:24:15,967 DEBUG [M:0;592d8b721726:41703 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c136a855f38943f787418fc2fa1e8fe4 is 82, key is hbase:meta,,1/info:regioninfo/1732778477031/Put/seqid=0 2024-11-28T07:24:15,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742667_1843 (size=5672) 2024-11-28T07:24:16,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T07:24:16,049 INFO [RS:0;592d8b721726:33143 {}] regionserver.HRegionServer(1307): Exiting; stopping=592d8b721726,33143,1732778474488; zookeeper connection closed. 2024-11-28T07:24:16,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33143-0x1003d00eeb50001, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T07:24:16,049 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4b84f476 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4b84f476 2024-11-28T07:24:16,050 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-28T07:24:16,371 INFO [M:0;592d8b721726:41703 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2526 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c136a855f38943f787418fc2fa1e8fe4 2024-11-28T07:24:16,390 DEBUG [M:0;592d8b721726:41703 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3edde1aa6fd743498a796e1e85a6f942 is 2279, key is \x00\x00\x00\x00\x00\x00\x00\xA6/proc:d/1732778628601/Put/seqid=0 2024-11-28T07:24:16,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742668_1844 (size=48252) 2024-11-28T07:24:16,794 INFO [M:0;592d8b721726:41703 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=861.42 KB at sequenceid=2526 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3edde1aa6fd743498a796e1e85a6f942 2024-11-28T07:24:16,798 INFO [M:0;592d8b721726:41703 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3edde1aa6fd743498a796e1e85a6f942 2024-11-28T07:24:16,825 DEBUG [M:0;592d8b721726:41703 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e78973c40e4b4435b4d8a4aaba191a06 is 69, key is 592d8b721726,33143,1732778474488/rs:state/1732778476238/Put/seqid=0 2024-11-28T07:24:16,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073742669_1845 (size=5156) 2024-11-28T07:24:17,230 INFO [M:0;592d8b721726:41703 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2526 (bloomFilter=true), to=hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e78973c40e4b4435b4d8a4aaba191a06 2024-11-28T07:24:17,234 DEBUG [M:0;592d8b721726:41703 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c136a855f38943f787418fc2fa1e8fe4 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c136a855f38943f787418fc2fa1e8fe4 2024-11-28T07:24:17,238 INFO [M:0;592d8b721726:41703 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c136a855f38943f787418fc2fa1e8fe4, entries=8, sequenceid=2526, filesize=5.5 K 2024-11-28T07:24:17,239 DEBUG [M:0;592d8b721726:41703 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3edde1aa6fd743498a796e1e85a6f942 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3edde1aa6fd743498a796e1e85a6f942 2024-11-28T07:24:17,242 INFO [M:0;592d8b721726:41703 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3edde1aa6fd743498a796e1e85a6f942 2024-11-28T07:24:17,242 INFO [M:0;592d8b721726:41703 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3edde1aa6fd743498a796e1e85a6f942, entries=197, sequenceid=2526, filesize=47.1 K 2024-11-28T07:24:17,243 DEBUG [M:0;592d8b721726:41703 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e78973c40e4b4435b4d8a4aaba191a06 as hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e78973c40e4b4435b4d8a4aaba191a06 2024-11-28T07:24:17,245 INFO [M:0;592d8b721726:41703 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44329/user/jenkins/test-data/4c4adc0a-76e2-c002-afe2-2a918985fe4e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e78973c40e4b4435b4d8a4aaba191a06, entries=1, sequenceid=2526, filesize=5.0 K 2024-11-28T07:24:17,246 INFO [M:0;592d8b721726:41703 {}] regionserver.HRegion(3040): Finished flush of dataSize ~861.98 KB/882668, heapSize ~1.04 MB/1090792, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1293ms, sequenceid=2526, compaction requested=false 2024-11-28T07:24:17,250 INFO [M:0;592d8b721726:41703 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T07:24:17,251 DEBUG [M:0;592d8b721726:41703 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-28T07:24:17,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35055 is added to blk_1073741830_1006 (size=1048169) 2024-11-28T07:24:17,253 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-28T07:24:17,253 INFO [M:0;592d8b721726:41703 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-28T07:24:17,254 INFO [M:0;592d8b721726:41703 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:41703 2024-11-28T07:24:17,256 DEBUG [M:0;592d8b721726:41703 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/592d8b721726,41703,1732778473746 already deleted, retry=false 2024-11-28T07:24:17,359 INFO [M:0;592d8b721726:41703 {}] regionserver.HRegionServer(1307): Exiting; stopping=592d8b721726,41703,1732778473746; zookeeper connection closed. 2024-11-28T07:24:17,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T07:24:17,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41703-0x1003d00eeb50000, quorum=127.0.0.1:56318, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T07:24:17,365 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T07:24:17,368 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T07:24:17,368 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T07:24:17,368 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T07:24:17,368 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/hadoop.log.dir/,STOPPED} 2024-11-28T07:24:17,372 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-28T07:24:17,372 WARN [BP-2139004249-172.17.0.2-1732778470874 heartbeating to localhost/127.0.0.1:44329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-28T07:24:17,372 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-28T07:24:17,372 WARN [BP-2139004249-172.17.0.2-1732778470874 heartbeating to localhost/127.0.0.1:44329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2139004249-172.17.0.2-1732778470874 (Datanode Uuid 7160b532-67ad-4edd-8dd7-ef7aec28b2f2) service to localhost/127.0.0.1:44329 2024-11-28T07:24:17,375 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/cluster_b29cffd8-c664-5fae-adbc-5f9b55ebe6d3/dfs/data/data1/current/BP-2139004249-172.17.0.2-1732778470874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T07:24:17,376 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/cluster_b29cffd8-c664-5fae-adbc-5f9b55ebe6d3/dfs/data/data2/current/BP-2139004249-172.17.0.2-1732778470874 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T07:24:17,377 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-28T07:24:17,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-28T07:24:17,395 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T07:24:17,395 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T07:24:17,395 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T07:24:17,395 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5eb927dc-b33d-fd0f-8268-1c9502c7d0ae/hadoop.log.dir/,STOPPED} 2024-11-28T07:24:17,427 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-28T07:24:17,626 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down